pax_global_header 0000666 0000000 0000000 00000000064 14342141206 0014507 g ustar 00root root 0000000 0000000 52 comment=2671a5c357526139e42ba77b2adce5698cc4a6cf
sequel-5.63.0/ 0000775 0000000 0000000 00000000000 14342141206 0013100 5 ustar 00root root 0000000 0000000 sequel-5.63.0/.ci.gemfile 0000664 0000000 0000000 00000003514 14342141206 0015106 0 ustar 00root root 0000000 0000000 # This file is only used for CI.
source 'http://rubygems.org'
gem 'minitest-hooks'
gem 'minitest-global_expectations'
# Plugin/Extension Dependencies
gem 'tzinfo'
if RUBY_VERSION < '2.1'
gem 'nokogiri', '<1.7'
elsif RUBY_VERSION < '2.3'
gem 'nokogiri', '<1.10'
elsif RUBY_VERSION < '2.4'
gem 'nokogiri', '<1.11'
elsif RUBY_VERSION < '2.5'
gem 'nokogiri', '<1.12'
elsif RUBY_VERSION < '2.6'
gem 'nokogiri', '<1.13'
else
gem 'nokogiri'
end
if RUBY_VERSION < '2.3'
gem 'i18n', '<1.5'
end
if RUBY_VERSION < '2.2.0'
gem 'activemodel', '<5.0.0'
gem 'concurrent-ruby', '<1.1.10'
elsif RUBY_VERSION < '2.4.0'
gem 'activemodel', '<6.0.0'
elsif RUBY_VERSION < '2.7.0'
gem 'activemodel', '<7.0.0'
else
gem 'activemodel'
end
if RUBY_VERSION < '3.1.0' && RUBY_VERSION >= '3.0.0'
gem 'json', '2.5.1'
elsif RUBY_VERSION < '2.0.0'
gem 'json', '<1.8.5'
elsif RUBY_VERSION < '2.3.0'
gem 'json', '<2.6'
else
gem 'json'
end
if RUBY_VERSION < '2.0.0'
gem 'rake', '<10'
elsif RUBY_VERSION < '2.3.0'
gem 'rake', '<13'
else
gem 'rake'
end
if RUBY_VERSION < '2.4.0'
# Until mintest 5.12.0 is fixed
gem 'minitest', '5.11.3'
else
gem 'minitest', '>= 5.7.0'
end
# MRI Adapter Dependencies
platforms :ruby do
sequel_pg = RUBY_VERSION.split('.')[1].to_i.send(Time.now.yday.even? ? :even? : :odd?)
gem "sqlite3"
if RUBY_VERSION < '2.0.0'
gem "pg", '<0.19.0'
gem "mysql2", '<0.5'
else
gem "pg", RUBY_VERSION < '2.2.0' ? '<1.2.0' : '>0'
gem "mysql2"
end
# Test current sequel_pg on half of the MRIs, and pure-ruby on the other half
if sequel_pg
gem 'sequel_pg', git: 'https://github.com/jeremyevans/sequel_pg', require: 'sequel'
end
end
# JRuby Adapter Dependencies
platforms :jruby do
if RUBY_VERSION < '2.4'
gem 'racc', '<1.6'
end
gem 'jdbc-sqlite3'
gem 'jdbc-mysql'
gem 'jdbc-postgres'
end
sequel-5.63.0/.github/ 0000775 0000000 0000000 00000000000 14342141206 0014440 5 ustar 00root root 0000000 0000000 sequel-5.63.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14342141206 0016475 5 ustar 00root root 0000000 0000000 sequel-5.63.0/.github/workflows/ci.yml 0000664 0000000 0000000 00000002633 14342141206 0017617 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
permissions:
contents: read
jobs:
tests:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:latest
ports: ["5432:5432"]
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
env:
POSTGRES_PASSWORD: postgres
mysql:
image: mysql:latest
env:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: sequel_test
ports: ["3306:3306"]
options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3
strategy:
fail-fast: false
matrix:
ruby: [ "1.9.3", "2.0.0", 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, "3.0", 3.1, jruby-9.1, jruby-9.2, jruby-9.3, truffleruby-head ]
name: ${{ matrix.ruby }}
env:
BUNDLE_GEMFILE: .ci.gemfile
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -yqq install libpq-dev libmysqlclient-dev
- run: sudo apt-get -yqq install libxml2-dev libxslt-dev
if: startsWith(matrix.ruby, 'truffleruby')
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby }}
bundler-cache: true
- run: bundle exec rake spec_ci
env:
DEFAULT_DATABASE: 1
MYSQL_ROOT_PASSWORD: 1
continue-on-error: ${{ startsWith(matrix.ruby, 'truffleruby') }}
sequel-5.63.0/.gitignore 0000664 0000000 0000000 00000000176 14342141206 0015074 0 ustar 00root root 0000000 0000000 *.lock
*.rbc
*.swp
/coverage
/rdoc
/sequel-*.gem
/spec/bin-sequel-*
/spec/spec_config.rb
/www/public/*.html
/www/public/rdoc*
sequel-5.63.0/CHANGELOG 0000664 0000000 0000000 00000155702 14342141206 0014324 0 ustar 00root root 0000000 0000000 === 5.63.0 (2022-12-01)
* Make validates_associated plugin avoid database type errors for non-integer association keys (jeremyevans) (#1968)
* Make tactical_eager_loading plugin work better with table inheritance plugins (rolftimmermans, jeremyevans) (#1962)
* Add support for pool_class: :timed_queue on Ruby 3.2+, using a Queue for available connections (jeremyevans)
* Allow :pool_class Database option to be specified as a string to more easily choose a different pool type (jeremyevans)
* Use compare_by_identity hashes for Thread-keyed hashes in threaded connection pools (jeremyevans)
* Skip use of JRuby workaround on JRuby 9.3.9.0+ in named_timezones extension as JRuby fixed the related bug (jeremyevans)
=== 5.62.0 (2022-11-01)
* Add back the pg_auto_parameterize extension for automatically using bound variables when using postgres adapter with pg driver (jeremyevans)
* Add pg_extended_integer_support extension for customizing behavior when literalizing a Ruby integer outside PostgreSQL bigint range (jeremyevans)
* Raise Postgres::IntegerOutsideBigintRange if attempting to literalize a Ruby integer outside PostgreSQL bigint range (jeremyevans)
* Add primary_key_lookup_check_values plugin for typecasting and checking primary key values during lookup (jeremyevans)
* Setup validation of minimum and maximum values for integer columns in auto_validations (jeremyevans)
* Add validates_max_value and validates_min_value to validation_helpers (jeremyevans)
* Include :min_value and :max_value schema entries for integer columns on most databases (jeremyevans)
* Don't wrap multi-inserts in a transaction when it's not required (shannoncole, jeremyevans) (#1945)
* Update mock PostgreSQL adapter to default to PostgreSQL 15 instead of PostgreSQL 14 (jeremyevans)
* Support fractional seconds in the named_timezones extension (jeremyevans) (#1943)
* Cache reflection datasets in the postgres adapter to improve performance (jeremyevans)
* Handle BC dates and timestamps in bound variables when using the pg_extended_date_support extension (jeremyevans)
* Correctly format hstore[] types in bound variables on PostgreSQL (jeremyevans)
* Fix corner case in eager loading where window function eager limit strategy is used, but row number entry is not removed (jeremyevans)
* Support server/shard specific :after_connect and :connect_sqls Database options (jeremyevans) (#1935)
=== 5.61.0 (2022-10-01)
* Make Database#foreign_key_list on PostgreSQL return results for partitioned tables (jeremyevans)
* Add Database#check_string_typecast_bytesize for checking bytesize of strings before typecasting (jeremyevans)
* Treat negative hexidecimal strings similar to positive hexidecimal strings when typecasting to integer (jeremyevans)
* Remove is_json and is_not_json methods from the pg_json_ops extension, as the support was removed in PostgreSQL 15 beta 4 (jeremyevans)
* Fix handling of timestamps before the date of calendar reform when using pg_extended_date_support extension on Ruby 3.2 (jeremyevans)
=== 5.60.1 (2022-09-02)
* Revert conversion of respond_to? to defined?, as it breaks with unused refinements on Ruby 2 (jeremyevans) (#1919)
=== 5.60.0 (2022-09-01)
* Support arbitrary expressions for date_arithmetic interval values on PostgreSQL 9.4+ (jeremyevans)
* Support native IS DISTINCT FROM on SQLite 3.39+ instead of emulating support in the is_distinct_from extension (jeremyevans)
* Support HAVING without GROUP BY on SQLite 3.39+ (jeremyevans)
* Convert most respond_to? calls to equivalent defined? for better performance (jeremyevans)
=== 5.59.0 (2022-08-01)
* Set :allow_eager association option to false for instance specific associations without eager loaders (jeremyevans)
* Add require_valid_schema plugin for checking that model classes have schema parsed as expected (jeremyevans)
* Model classes created from aliased expressions and literal strings no longer use the simple table optimization (jeremyevans)
* Model code that does not swallow connection errors will now also not swallow disconnect errors (jeremyevans) (#1892)
* Add is_json and is_not_json methods to the pg_json_ops extension, for the PostgreSQL 15+ IS [NOT] JSON operator (jeremyevans)
* Support :security_invoker view option on PostgreSQL 15+, for views where access uses permissions of user instead of owner (jeremyevans)
* Support :nulls_distinct index option on PostgreSQL 15+, for NULLS [NOT] DISTINCT (jeremyevans)
* Support sequel-postgres-pr driver in the postgres adapter (jeremyevans)
=== 5.58.0 (2022-07-01)
* Support :disable_split_materialized Database option on MySQL to work around optimizer bug in MariaDB 10.5+ affecting association tests (jeremyevans)
* Add Dataset#merge* methods to support MERGE statement on PostgreSQL 15+, MSSQL, Oracle, DB2, H2, HSQLDB, and Derby (jeremyevans)
=== 5.57.0 (2022-06-01)
* Make Database#create_function on PostgreSQL accept :parallel option (bananarne) (#1870)
* Add support for :on_update_current_timestamp column option on MySQL (jeremyevans)
* Add is_distinct_from extension with support for the IS DISTINCT FROM operator (jeremyevans)
=== 5.56.0 (2022-05-01)
* Make alter_table add_column/add_foreign_key methods support :index option to create an index on the column (jeremyevans)
* Support creation of STRICT tables on SQLite 3.37.0+ via create_table :strict option (jeremyevans)
* Add sqlite_json_ops extension for DSL support for JSON functions and operators added in SQLite 3.38.0 (jeremyevans)
* Recognize "INTEGER" type same as "integer" type in the schema dumper, helpful on SQLite 3.37.0+ (jeremyevans)
=== 5.55.0 (2022-04-01)
* Support :setup_regexp_function Database option in the sqlite adapter to allow the use of regexps when querying (jeremyevans)
* Add auto_restrict_eager_graph plugin for automatically disallow eager_graph with associations needing but lacking graph options (jeremyevans)
* Fix placeholder literalizer optimization for dataset aggregate methods on a model dataset (belousovAV) (#1847, #1848)
=== 5.54.0 (2022-03-01)
* Add enum plugin for treating columns as enums in a model (jeremyevans) (#1839)
=== 5.53.0 (2022-02-01)
* Make Dataset#_sql_comment private when using the Database sql_comments extension (jeremyevans)
* Fix prepared statements in the mysql2 adapter to reuse native prepared statements (jeremyevans) (#1832)
* Support H2 version 2+ in the jdbc/h2 adapter (jeremyevans) (#1817)
* Work around active_support breaking subclasses plugin on Ruby <3.1 (jeremyevans) (#1816)
* Fix error handling if trying to setup column_encryption plugin without keys (jeremyevans) (#1815)
=== 5.52.0 (2022-01-01)
* Use Class#subclasses if available in the subclasses plugin, instead of a custom Model.subclasses accessor (jeremyevans)
* Add Model.descendants and .freeze_descendants to subclasses plugin (jeremyevans)
* Avoid use of deprecated Refinement#include on Ruby 3.1+ (jeremyevans)
* Add date_parse_input_handler extension for custom handling of input to date parsing methods (jeremyevans)
* Make postgres adapter respect Database#default_string_column_size (jeremyevans)
* Make pg_interval extension work with ActiveSupport 7.0 (jeremyevans)
* Make :ruby_default schema entry for type: :datetime respect Sequel.datetime_class (jeremyevans)
* Make alter_table drop_constraint have an effect on MySQL 8.0.19+ (jeremyevans)
* Make mysql adapter support ruby-mysql 3 API (jeremyevans) (#1795)
* Make mysql adapter no longer use connection's server_version, since it isn't accurate when using the ruby-mysql driver (jeremyevans)
* Add sql_comments plugin for automatically including comments on queries generated by model class, instance, and dataset methods (jeremyevans)
* Make sql_comments Database extension support Database#with_comments, for automatically including comments for queries executed inside the block (jeremyevans)
* Fix sql_comments extension to not modify cached SQL for a dataset (jeremyevans)
=== 5.51.0 (2021-12-01)
* Make eager loading via tactical_eager_loading no longer modify objects who already have a cached value for the association (jeremyevans)
* Make association cloning handle cases where clone association sets different :class option than cloned association (jeremyevans)
* Make column schema entries on MySQL include an :extra entry for the Extra column in DESCRIBE output (bschmeck) (#1791)
* Update mock PostgreSQL adapter to default to PostgreSQL 14 instead of PostgreSQL 9.5 (jeremyevans)
* Support Dataset#with_recursive :search and :cycle options on PostgreSQL 14+ for result ordering and cycle detection (jeremyevans)
* Avoid method redefined verbose mode warnings in lazy_attributes plugin (jeremyevans)
=== 5.50.0 (2021-11-01)
* Make Migrator :allow_missing_migration_files also allow down migrations where the current database version is greater than the last migration file version (francisconeves97) (#1789)
* Fix Model#freeze in composition, serialization, and serialization_modification_detection plugins to return self (jeremyevans) (#1788)
* Fix typecasting of lazy columns when using lazy_attributes plugin in model where dataset selects from subquery (jeremyevans)
* Add :before_preconnect Database option, for configuring extensions loaded via :preconnect_extensions (MarcPer, jeremyevans) (#1786)
* Change Dataset#columns! to use a LIMIT 0 query instead of a LIMIT 1 query (jeremyevans)
* Add sql_log_normalizer extension for normalizing logged SQL, helpful for analytics and sensitive data (jeremyevans)
* Add support for range_merge, multirange, and unnest, and PGMultiRange#op to pg_range_ops extension (jeremyevans)
* Add pg_multirange extension with support for PostgreSQL 14+ multirange types (jeremyevans)
=== 5.49.0 (2021-10-01)
* Switch block_given? usage to defined?(yield) (jeremyevans)
* Support table aliases for JOIN USING columns on PostgreSQL 14+ (jeremyevans)
* Support calling PostgreSQL procedures without arguments (jeremyevans)
* Support hstore subscripts in pg_hstore_ops on PostgreSQL 14+, for updating only part of an hstore value (jeremyevans)
* Support JSONB subscripts in pg_json_ops on PostgreSQL 14+, for updating only part of a JSONB value (jeremyevans)
* Support SQL::Expression#sequel_ast_transform for custom AST transforms on arbitrary expressions (jeremyevans)
* Add Database#create_trigger :replace option on PostgreSQL 14+ for CREATE OR REPLACE TRIGGER (jeremyevans)
* Make auto_validations plugin automatically setup no_null_byte validations (jeremyevans)
* Add Model#validates_no_null_byte to validation_helpers plugin (jeremyevans)
=== 5.48.0 (2021-09-01)
* Make the unused_associations plugin association reflection tracking work correctly when combining coverage runs (jeremyevans)
* Add Database#like_without_collate on MSSQL, to avoid using COLLATE on LIKE arguments, which can significantly improve performance (jeremyevans)
* Add Model::Errors#full_message private method for easiest i18n support for errors with multiple attributes (jeremyevans) (#1779)
=== 5.47.0 (2021-08-01)
* Make the unused_associations plugin track access to association reflections to determine whether associations are used (jeremyevans)
* Support :db option for join tables in {many,one}_through_many to use a separate query for each join table (jeremyevans)
* Support :join_table_db option for many_to_many/one_through_one associations, to use a separate query for the join table (jeremyevans)
* Support :allow_eager_graph and :allow_filtering_by association options (jeremyevans)
* Add Database#rename_tables on MySQL, for renaming multiple tables in a single call (nick96) (#1774)
* Support Dataset#returning on SQLite 3.35+ (jeremyevans)
=== 5.46.0 (2021-07-01)
* Add unused_associations plugin, for determining which associations and association methods are not used (jeremyevans)
* Make nil :setter/:adder/:remover/:clearer association options not create related methods (jeremyevans)
=== 5.45.0 (2021-06-01)
* Fix handling of NULL values in boolean columns in the ODBC adapter (jeremyevans) (#1765)
* Add auto_validations_constraint_validations_presence_message plugin for auto_validations/constraint_validations presence message integration (jeremyevans)
* Support Dataset#with :materialized option on SQLite 3.35+ for [NOT] MATERIALIZED (jeremyevans)
* Use ALTER TABLE DROP COLUMN for dropping columns on SQLite 3.35+ (jeremyevans)
=== 5.44.0 (2021-05-01)
* Add concurrent_eager_loading plugin, for eager loading multiple associations concurrently using separate threads (jeremyevans)
* Support :weeks as a interval unit in the date_arithmetic extension (jeremyevans) (#1759)
* Raise an exception if an interval hash with an unsupported key is passed in the date_arithmetic extension (jeremyevans) (#1759)
* Support dropping non-composite unique constraints on SQLite (jeremyevans) (#1755)
=== 5.43.0 (2021-04-01)
* Add column_encryption plugin, for encrypting column values (jeremyevans)
=== 5.42.0 (2021-03-01)
* Make the ado timestamp conversion proc a normal conversion proc that can be overridden similar to other conversion procs (jeremyevans)
* Add :reject_nil option to the nested_attributes method, to ignore calls where nil is passed as the associated object data (jeremyevans)
* Add async_thread_pool plugin for easier async usage with model classes and support for async destroy, with_pk, and with_pk! methods (jeremyevans)
* Add async_thread_pool Database extension for executing queries asynchronously using a thread pool (jeremyevans)
* Fix possible thread safety issue in Database#extension that could allow Module#extended to be called twice with the same Database instance (jeremyevans)
* Support cases where validations make modifications beyond setting errors in Model#freeze (jeremyevans)
* Add Model#to_json_data to the json_serializer plugin, returning a JSON data structure (jeremyevans)
=== 5.41.0 (2021-02-01)
* Have explicit :text option for a String column take priority over :size option on PostgreSQL (jeremyevans) (#1750)
* Support a :skip_invalid option in auto_validations plugin for not adding errors to a column that already has an error (jeremyevans)
* Support a :skip_invalid option in validation_helpers for not adding an error to a column that already has an error (jeremyevans)
* Support :adder, :remover, and :clearer association options that use keyword arguments in Ruby 2.7+ (jeremyevans)
* Make pg_interval use the same number of seconds per year and per month as ActiveSupport::Duration when using ActiveSupport 5.1+ (jeremyevans)
=== 5.40.0 (2021-01-01)
* Support UPDATE FROM syntax in SQLite 3.33.0+ (jeremyevans)
* Have pg_interval extension work with ActiveSupport 6.1 (jeremyevans)
* Have date_arithmetic extension work with ActiveSupport 6.1 (jeremyevans)
* Avoid method redefinition warnings in verbose warning mode (jeremyevans)
=== 5.39.0 (2020-12-01)
* Support :clustered option for primary key and unique constraints on Microsoft SQL Server (jeremyevans)
* Do not modify the size of binary columns when using set_column_allow_null on Microsoft SQL Server (jeremyevans) (#1736)
* Add a fork safety guide with more detail on how to use Sequel with libraries that fork (janko) (#1733)
* Make the roots_dataset method in the tree plugin work with queries using joins (jeremyevans) (#1731)
* Make Database#tables return partitioned tables on PostgreSQL 10+ (epoberezhny) (#1729, #1730)
=== 5.38.0 (2020-11-01)
* Do not add new Database instances to Sequel::DATABASES if the test connection fails (jeremyevans) (#1727)
* Support the newer com.mysql.cj.jdbc.Driver in the jdbc/mysql adapter (jeremyevans)
* Do not swallow disconnect errors in Database#create_or_replace_view or Database#create_table* on Oracle (jeremyevans)
* Only rescue non-disconnect Sequel::DatabaseErrors in Postgres::Database#server_version (jeremyevans) (#1724)
* Make the single_table_inheritance and prepared_statements plugins work if loaded into the same class (jeremyevans) (#1721)
=== 5.37.0 (2020-10-01)
* Recognize more unsigned decimal/float types in the schema dumper (akimd, jeremyevans) (#1720)
* Add Postgres::PGRow::{Array,Hash}Row#op to the pg_row_ops extension if the pg_row extension is loaded (jeremyevans)
* Add Model#column_previously_was and #column_previously_changed? to the dirty plugin (jeremyevans)
* Raise Migrator::Error if attempting to migrate down to a version where there are necessary migration files missing (jeremyevans) (#1716)
=== 5.36.0 (2020-09-01)
* Handle passing keyword arguments through class methods defined via Plugins.def_dataset_method on Ruby 2.7+ (jeremyevans)
* Handle passing keyword arguments through when loading plugins on Ruby 2.7+ (jeremyevans)
* Handle passing keyword arguments through migrations when defining custom Database methods that accept keywords on Ruby 2.7+ (jeremyevans)
* Handle passing keyword arguments through Dataset#query when using the query extension on Ruby 2.7+ (jeremyevans)
* Handle passing keyword arguments through the association proxy when using the association_proxies plugin on Ruby 2.7+ (jeremyevans)
* Handle passing keyword arguments through the class method to a method defined in dataset_module on Ruby 2.7+ (adam12) (#1713)
* Stream result sets in the odbc adapter for better performance and lower memory usage (sparrovv) (#1711)
* Add Postgres::JSONBOp#set_lax and #path_*_tz methods to the pg_json_ops extension for new jsonb functions added in PostgreSQL 13 (jeremyevans)
* Add Dataset#with_ties on PostgreSQL 13+ and Microsoft SQL Server to include rows with same order as final row (jeremyevans)
* Add a :current_schema option to Database#view_exists? (only defined on Oracle) to look in the current schema instead of non-system schemas (jeremyevans) (#1710)
* Recognize another disconnect error in the mysql and mysql2 adapters (jeremyevans) (#1706)
=== 5.35.0 (2020-08-01)
* Recognize another disconnect error in the oracle adapter (sterlzbd) (#1705)
* Consider all associations with :dataset options as instance-specific associations (jeremyevans)
* Make Model.finalize_associations not break with instance-specific associations (jeremyevans)
* Make association placeholder loader consider block if instance_specific: false association option is used (jeremyevans)
* Copy composite unique constraints when emulating alter table operations on SQLite (jeremyevans) (#1704)
* Add instance_specific_default plugin for setting default association :instance_specific value, or warning/raising for cases where it is not specified (jeremyevans)
* Make Model.plugin issue deprecation warning if loading plugin with arguments and block if plugin does not accept arguments/block (jeremyevans)
* Make validation_class_methods consider all :if, :allow_missing, :allow_nil, and :allow_blank settings, instead of just the first (jeremyevans)
* Include hash entries with nil keys in Dataset#to_dot output in to_dot extension (jeremyevans)
* Remove unneeded conditionals from plugins and extensions (jeremyevans)
* Fix exception class in run_transaction_hooks extension if calling run_after_{commit,rollback}_hooks outside of a transaction (jeremyevans)
=== 5.34.0 (2020-07-01)
* Make eager_graph work correctly if called with no associations (jeremyevans)
* Make :ruby eager limit strategy handle cases where there is no limit or offset (jeremyevans)
* Do not keep a reference to a Sequel::Database instance that raises an exception during initialization (jeremyevans)
* Make Database#pool.all_connections not yield for a single connection pool in disconnected state (jeremyevans)
* Raise an exception if trying to disconnect a server that doesn't exist in the sharded connection pools (jeremyevans)
* Support :refresh option when calling *_pks getter method in the association_pks plugin (jeremyevans)
* Support caching of repeated calls to *_pks getter method in the association_pks plugin using :cache_pks association option (jeremyevans)
* Add *_pks_dataset methods for one_to_many and many_to_many associations when using the association_pks plugin (jeremyevans)
=== 5.33.0 (2020-06-01)
* Support custom join types on a per-association basis when using eager_graph/association_join (jeremyevans)
* Support primary_key with type: :smallserial on PostgreSQL (j-a-m-l) (#1698)
* Add Database#current_timestamp_utc accessor on SQLite to keep CURRENT_* in UTC instead of converting to localtime (jeremyevans)
=== 5.32.0 (2020-05-01)
* Allow Database#create_table? work with :partition_of option on PostgreSQL (jeremyevans) (#1690)
* Add fiber_concurrency extension, for using Fiber.current instead of Thread.current for checking out connections (jeremyevans)
* Move most Sequel singleton methods into a module that extends Sequel for easier overriding (jeremyevans)
* Fix method visibility issues in model, plugin, extension, and adapter code (jeremyevans)
* Avoid defining conversion procs for PostgreSQL inet/cidr types in pg_inet extension when using sequel_pg 1.13.0+ (jeremyevans)
* Add run_transaction_hooks Database extension, allowing for running the transaction hooks before commit/rollback, for use with transactional testing (jeremyevans)
* Recognize timestamp(N) with time zone type (isc) (#1684)
=== 5.31.0 (2020-04-01)
* Fix alter_table drop_constraint :primary_key option on SQLite for non-integer primary keys (jeremyevans)
* Add skip_saving_columns plugin, which supports columns to skip when saving, and skips generated columns by default (joeosburn, jeremyevans) (#1681, #1682)
* Add support for creating partitioned tables in PostgreSQL 10+ using :partition_by and :partition_of options (jeremyevans)
* Dump generated columns as generated columns when using the schema_dumper with :same_db option on PostgreSQL 12+ (jeremyevans) (#1680)
* Ignore defaults for generated columns by default when using the schema dumper (jeremyevans) (#1680)
* Include generated columns in schema on SQLite 3.31+ (jeremyevans)
* Add :generated schema entry on PostgreSQL 12+ and SQLite 3.31+ for whether the columns is generated (jeremyevans)
* Add association_lazy_eager_option plugin for supporting :eager option for association method (jeremyevans)
* Add forbid_lazy_load plugin for forbidding lazy loading of associations, to help find N+1 issues (jeremyevans)
=== 5.30.0 (2020-03-01)
* Remove specs and old release notes from the gem to reduce gem size by over 40% (jeremyevans)
* When using Database#transaction :retry_on, call :before_retry option if retrying even if :num_retries is nil (jcmfernandes) (#1678)
* Support generated columns on SQLite 3.31+ using :generated_always_as and :generated_type options (jeremyevans)
=== 5.29.0 (2020-02-01)
* Recognize another disconnect error in the tinytds adapter (jeremyevans)
* Fix verbose warning in postgres adapter when using prepared statements and recent versions of ruby-pg (jeremyevans)
* Work correctly on Ruby 2.8+ by supporting second argument for initialize_clone (jeremyevans)
* Add empty_failure_backtraces plugin for empty backtraces for ValidationFailed and HookFailed exceptions, much faster on JRuby (jeremyevans)
* Add Dataset#json_serializer_opts to json_serializer plugin, allowing to set json_serializer options on a per-dataset basis (jeremyevans)
=== 5.28.0 (2020-01-01)
* Warn when calling Sequel::JDBC::Postgres::Dataset#with_fetch_size (jeremyevans) (#1665)
* Add exclude_or_null extension, for filtering datasets where the condition is false or NULL (jeremyevans)
* Add any_not_empty extension, for making Dataset#any? without a block mean !empty? (jeremyevans)
=== 5.27.0 (2019-12-01)
* Add Sequel::DEFAULT for a DEFAULT expression, useful for assigning to default values (jeremyevans)
* Make Postgres::ArrayOp#join in pg_array_ops extension work correctly on PostgreSQL <9.1 (jeremyevans)
* Make pg_enum extension work correctly on PostgreSQL 8.3-9.0 (jeremyevans)
* Emulate FILTER clause for aggregate functions using CASE on databases not supporting it directly (jeremyevans)
* Support ordering by NULLS FIRST/NULLS LAST without emulation on SQLite 3.30+ (jeremyevans)
=== 5.26.0 (2019-11-01)
* Recognize two additional foreign key constraint violation codes on MySQL 8.0.13+ (rianmcguire) (#1657)
* Support table aliases for single-table INSERT statements on PostgreSQL 9.5+ (jeremyevans) (#1656)
* Implement Sequel::Postgres::PGRange#hash so instances work correctly in hashes (jeremyevans) (#1648)
* Make dirty plugin work correctly with typecast_on_load plugin (jeremyevans) (#1647)
* Add support for :require_modification option when setting up nested_attributes (jeremyevans)
* Add support for SQL/JSON path expressions to the pg_json_ops extension, supported by PostgreSQL 12+ (jeremyevans)
=== 5.25.0 (2019-10-01)
* Fix Sequel::SQL::NumericMethods#coerce to not raise NoMethodError if super method is not defined (jeremyevans) (#1645)
* Allow setting a default for a column that already has a default on Microsoft SQL Server (jeremyevans)
* Fix keyword argument separation warnings on Ruby master branch in csv_serializer plugin (jeremyevans)
* Add association_multi_add_remove plugin for adding/removing multiple associated objects in a single method call (AlexWayfer, jeremyevans) (#1641, #1643)
* Make sharding plugin integrate with server_block extension (jeremyevans)
=== 5.24.0 (2019-09-01)
* Add Database#skip_logging? private method designed for extensions to force query timing even if no logger is present (adam12) (#1640)
* Allow a hostname specified in a defaults_file in the mysql2 adapter, by not explicitly setting :host (sapio-bdeamer) (#1638)
* Convert all database array types to Ruby arrays in the jdbc adapter (jeremyevans)
* Add static_cache_cache plugin for caching rows for static_cache models to a file to avoid database queries during model initialization (jeremyevans)
* Add :cache_file plugin option to pg_auto_constraint_validations plugin, for caching metadata to a file for faster initialization (jeremyevans)
* Support :unique_deferrable and :primary_key_deferrable column options (jeremyevans)
* Support :generated_always_as column option on PostgreSQL 12+ (jeremyevans)
=== 5.23.0 (2019-08-01)
* Work around a bug on jdbc-sqlite3 3.27.2.1 when parsing schema for tables with columns with default values (jeremyevans)
* Work around a bug in jdbc-sqlite3 3.27.2.1 when in Database#foreign_key_list in the jdbc/sqlite3 adapter (jeremyevans)
* Make Dataset#execute* private methods respect explicit servers option, fixing Dataset#paged_each in the postgres adapter when sharding (jeremyevans) (#1632)
* Handle instances of subclasses of core classes when wrapping objects in the pg_json extension (jeremyevans) (#1631)
* Support :ansi Database option in the tinytds adapter (kenaniah) (#1629)
* Support cross-database and linked servers when parsing schema on Microsoft SQL Server (kenaniah) (#1629)
* Add insert_conflict plugin for automatically handling unique constraint conflicts when saving new model instances on PostgreSQL 9.5+ and SQLite 3.24.0+ (jeremyevans)
* Avoid errors when parsing schema in the mock sqlite adapter (jeremyevans)
* Avoid possible thread-safety issue in the timezones support (jeremyevans)
* Handle offsets when typecasting an array or hash to datetime when Sequel.datetime_class = Time (jeremyevans)
* Support Sequel.datetime_class = Time when using the named_timezones extension (jeremyevans)
=== 5.22.0 (2019-07-01)
* Fix Dataset#multi_insert and #import with return: :primary_key on MSSQL when the dataset has a row_proc (jeremyevans) (#1627)
* Support Dataset#with :materialized option on PostgreSQL 12 for [NOT] MATERIALIZED (jeremyevans)
* Make Database#primary_key_sequence work on tables without serial sequences on PostgreSQL 12 (jeremyevans)
* Support ruby 2.7+ startless ranges in the pg_range extension (jeremyevans)
* Support ruby 2.7+ startless, endless ranges in filters, using an always true condition for them (jeremyevans)
* Support ruby 2.7+ startless ranges in filters, using just a <= or < operator for them (jeremyevans)
=== 5.21.0 (2019-06-01)
* Recognize additional DatabaseLockTimeout errors in mysql and mysql2 adapters (jeremyevans)
* Disallow eager_graph of ancestors and descendants associations when using the rcte_tree plugin (jeremyevans)
* Make jdbc/mysql adapter work when using JRuby with Java 11 (jeremyevans)
* Support window function options :window, :exclude, and :frame :type=>:groups, :start, and :end on SQLite 3.28.0+ (jeremyevans)
* Make the server_block extension respect the :servers_hash Database option (jeremyevans)
* Typecast string input for json/jsonb types as JSON strings instead of parsing as JSON in the pg_json extension when Database#typecast_json_strings is set to true (jeremyevans)
* Wrap JSON primitives (string, number, true, false, nil) in the pg_json extension when Database#wrap_json_primitives is set to true (jeremyevans)
* Convert the Database :timeout option to an integer in the sqlite adapter (jeremyevans) (#1620)
* Improve performance in ado adapter using more efficient inner loop (jeremyevans)
* Improve performance in ado adapter using faster callables for type conversion (jeremyevans)
* Fix handling of decimal values in the ado adapter when using locales where the decimal separator is , and not . (jeremyevans) (#1619)
=== 5.20.0 (2019-05-01)
* Fix reversing of alter_table add_foreign_key when :type option is used (jeremyevans) (#1615)
* Switch from using instance_exec to define_method for model associations and in some plugins (jeremyevans)
* Fix Database#server_version when using mysql2 adapter with mysql driver on MariaDB 10+ database (v-kolesnikov) (#1614)
* Make one_to_one setter method handle models that use joined datasets (jeremyevans) (#1612)
* Make auto_validations plugin work with the class_table_inheritance plugin (jeremyevans) (#1611)
* Avoid use of instance_exec for PlaceholderLiteralString#with_dataset (jeremyevans)
* Recognize float unsigned database types as float (keeguon, jeremyevans) (#1609)
* Support :savepoint options to Database#{after_commit,after_rollback} for making the hooks handle savepoints (jeremyevans)
* Avoid use of instance_exec in association_dependencies plugin (jeremyevans)
* Add pg_auto_constraint_validation_override to the pg_auto_constraint_validations plugin, for customizing columns and error message per constraint (jeremyevans)
* Make Database#check_constraints on PostgreSQL also include constraints where the related columns are not known (jeremyevans)
=== 5.19.0 (2019-04-02)
* Use more optimized approach to merging hashes in ruby 2.5+ (jeremyevans)
* Use SQLite extended result codes when using ruby-sqlite3 1.4.0+ (jeremyevans)
* Recognize additional SQLite extended result codes in the shared sqlite adapter (jeremyevans)
* Add Database#rename_enum_value to the pg_enum extension (AlexWayfer) (#1603)
* Make Database#drop_table delete constraint validations metadata for that table if using the constraint_validations extension (jeremyevans)
* Speed up row fetching in the sqlite adapter (jeremyevans)
* Speed up row fetching and type conversion in the sqlanywhere adapter (jeremyevans)
=== 5.18.0 (2019-03-01)
* Use singleton .call methods on plain objects instead of procs/methods for faster type conversion (jeremyevans)
* Add Sequel::SQL::Blob.call to avoid indirection when converting values from the database (jeremyevans)
* Use while instead of each for inner loops in sqlite and jdbc adapters for better performance (jeremyevans)
* Make after_initialize plugin not make the argument to Model.call optional (jeremyevans)
* Allow Dataset#paged_each to be called without a block in the postgres and mysql2 adapters (jeremyevans)
* Remove flow-control exceptions in connection_expiration and connection_validator extensions (jeremyevans)
* Add throw_failures plugin for throwing ValidationFailed and HookFailed exceptions instead of raising them, up to 10x performance increase on JRuby (jeremyevans)
* Support tzinfo 2 in addition to tzinfo 1 in the named_timezones extension (jeremyevans) (#1596)
=== 5.17.0 (2019-02-01)
* Support skip_auto_validations instance method in auto_validations plugin (oldgreen, jeremyevans) (#1592)
* Support :preconnect_extensions Database option for loading extensions before :preconnect option (jeremyevans)
* Avoid usage of Proc.new with implicit block as ruby 2.7+ deprecates this behavior (jeremyevans)
* Allow Sequel[].as to be used for constructing aliases with eager_graph (e.g. Model.eager_graph(Sequel[:a].as(:b))) (jeremyevans) (#1588)
=== 5.16.0 (2019-01-02)
* Convert integer columns to bigint columns when copying SQLite databases to other databases using bin/sequel -C (jeremyevans) (#1584)
* Use nicer error messages for missing or empty migration directories (Lavode) (#1585)
* Make alter table emulation work correctly in SQLite 3.26.0+ (jeremyevans) (#1582)
* Do not unset new one_to_one associated objects' reciprocal associations before saving associated objects in the nested_attributes plugin (jeremyevans)
* Do not validate new one_to_one associated objects twice when saving in the nested_attributes plugin (jeremyevans)
* Fix :qualify_tables option to class_table_inheritance plugin to work correctly with subclasses of subclasses (benalavi) (#1581)
* Make class_table_inheritance plugin use the schema cache instead of sending a query to get columns for tables (kenaniah) (#1580)
* Remove loading of mysqlplus in the mysql adapter (jeremyevans)
* Make mysql adapter work correctly on ruby 2.6+ (jeremyevans)
* Add Database#rollback_on_exit to rollback transactions instead of committing them when exiting the transaction block (jeremyevans)
* Enable window functions in SQLite 3.26.0+ (jeremyevans)
* Do not override existing methods when creating Sequel::Model attribute getter/setter methods (jeremyevans) (#1578)
* Use parentheses for expressions being subscripted (e.g. (array_agg(column))[1]) (jeremyevans)
=== 5.15.0 (2018-12-01)
* Add :conn_str option in the postgres adapter for PostgreSQL connection strings, if the pg driver is used (graywolf) (#1572)
* Add :qualify_tables option to class_table_inheritance plugin to automatically qualify subclass tables with superclass qualifier (benalavi) (#1571)
* Access already allocated connections in a thread safe manner when checking out connections in the sharded threaded connection pool (jeremyevans)
* Automatically support datasets using qualified tables in the class_table_inheritance plugin without having to use the :alias option (benalavi) (#1565)
* Support rename_column without emulation on SQLite 3.25+ (jeremyevans)
* Do not remove currently cached many_to_one associated objects when changing the related foreign key value from nil to non-nil (jeremyevans)
* Do not validate new *_to_many associated objects twice when saving in the nested_attributes plugin (jeremyevans)
* Add Model#skip_validation_on_next_save! for skipping validation on next save call (jeremyevans)
=== 5.14.0 (2018-11-01)
* Drop defaulting the :port option to 5432 in the postgres adapter, so that setting the :service option in :driver_options works (jeremyevans) (#1558)
* Do not cache values for columns without parseable defaults when using :cache option in defaults_setter plugin (jeremyevans)
* Emulate NULLS FIRST/LAST ordering on databases that do not natively support it (jeremyevans)
* Do not modify boolean expressions created from string or array if string or array is modified (jeremyevans)
* Make roots and roots_dataset dataset methods instead of class methods in the tree plugin (JelF) (#1554)
* Do not cache dataset SQL if dataset uses subquery that cannot cache SQL (jeremyevans)
* Make Model#=== work correctly for models with composite primary keys (jeremyevans)
* Add Model#pk_equal? as a more descriptive name for Model#=== (AlexWayfer) (#1550)
* Do not push down expression inversion in cases where it may result in incorrect behavior (e.g. ANY/SOME/ALL operators) (jeremyevans) (#1549)
=== 5.13.0 (2018-10-01)
* Support :single_value type in prepared statements (rintaun) (#1547)
* Make Model.all in static_cache plugin accept a block (AlexWayfer, jeremyevans) (#1543)
* Add constant_sql_override extension for overriding SQL used for constants such as CURRENT_TIMESTAMP (celsworth) (#1538)
* Do not cache from_self datasets if options are given (jeremyevans)
=== 5.12.0 (2018-08-31)
* Make constraint_validations extension respect Database#constraint_validations_table setting (jeremyevans)
* Make Sequel.extension load files from gems (jeremyevans)
* Map clob prepared statement argument type to OCI8::CLOB in the oracle adapter (pipistrellka) (#1534)
* Make Model.load_cache public in the static_cache plugin (AlexWayfer) (#1533)
* Enable support for NOWAIT on MariaDB 10.3+ (jeremyevans)
* Enable support for INTERSECT and EXCEPT on MariaDB 10.3+ (jeremyevans)
* Make tactical_eager_loading plugin handle automatic eager loading for associated objects created by eager_graph (jeremyevans)
* Cache eager_graph loader to speed up subsequent loads from the same dataset (jeremyevans)
* Add caller_logging database extension to log callers before queries, useful during development (jeremyevans)
* Add Database#call_procedure in the postgres adapter for calling PostgreSQL 11+ procedures (jeremyevans)
* Add eager_graph_eager plugin for chaining eager association loads after eager_graph association loads (jeremyevans)
* Support using Dataset#eager_graph in eager load callback for associations using join tables (jeremyevans)
* Make Dataset#graph handle existing selections without determinable aliases by forcing a subselect (jeremyevans)
* Freeze prepared statement arguments before returning the prepared statement (jeremyevans)
* Refactor emulated prepared statement internals to use a placeholder literalizer (jeremyevans)
=== 5.11.0 (2018-08-01)
* Fix using the jdbc/sqlserver adapter on JRuby 9.2+ (jeremyevans)
* Fix dumping schema for numeric/decimal columns with default values, broken starting in 5.9.0 (jeremyevans)
* Recognize additional check constraint violations on certain versions of SQLite (jeremyevans)
* Use cached model instances for Model.first calls without an argument or with a single integer argument in the static_cache plugin (AlexWayfer) (#1529)
* Support ON CONFLICT clause for INSERT on SQLite 3.24+ (jeremyevans)
* Support Dataset#window for WINDOW clause on MySQL 8 and SQLAnywhere (jeremyevans)
* Enable window function support on SQLAnywhere (jeremyevans)
* Support using a hash as a window function :frame option value, with support for ROWS/RANGE/GROUPS, numeric offsets, and EXCLUDE (jeremyevans)
* Allow using set_column_default with a nil value to remove the default value for a column on MySQL when the column is NOT NULL (jeremyevans)
=== 5.10.0 (2018-07-01)
* Use input type casts when using the postgres adapter with pg 0.18+ to reduce string allocations for some primitive types used as prepared statement arguments (jeremyevans)
* Assume local time if database timezone not specified when handling BC timestamps on JRuby 9.2.0.0 in the pg_extended_date_support extension (jeremyevans)
* Fix parsing of timetz types in the jdbc/postgresql adapter (jeremyevans)
* Make SQLTime.parse respect SQLTime.date and Sequel.application_timezone (jeremyevans)
* Add :top as an option in the list plugin (celsworth) (#1526)
* Fix Model#{ancestors,descendants,self_and_siblings} in the tree plugin when custom parent/children association names are used (jeremyevans) (#1525)
* Treat read-only mode error as disconnect error on mysql and mysql2 adapters, for better behavior on AWS Aurora cluster (jeremyevans)
* Don't use cached placeholder literalizers for in Dataset#{first,where_all,where_each,where_single_value} if argument is empty array or hash (jeremyevans)
* Support :tablespace option when adding tables, indexes, and materialized views on PostgreSQL (jeremyevans)
* Support :include option for indexes on PostgreSQL 11+ (jeremyevans)
* Allow the use of IN/NOT IN operators with set returning functions for Sequel::Model datasets (jeremyevans)
* Make many_to_pg_array associations in the pg_array_associations plugin work on PostgreSQL 11 (jeremyevans)
* Only load strscan library in pg_array extension if it is needed (jeremyevans)
* Don't remove related many_to_one associations from cache when setting column value to existing value for model instances that have not been persisted (jeremyevans) (#1521)
* Support ruby 2.6+ endless ranges in the pg_range extension (jeremyevans)
* Support ruby 2.6+ endless ranges in filters, using just a >= operator for them (jeremyevans)
=== 5.9.0 (2018-06-01)
* Support generated columns on MySQL 5.7+ and MariaDB 5.2+ (wjordan, jeremyevans) (#1517)
* Add escaped_like extension for creation of LIKE expressions with placeholders in the pattern without access to a dataset (jeremyevans)
* Modify jdbc adapter exception handling to work around ::NativeException deprecation in JRuby 9.2 (jeremyevans)
* Work around broken BC date handling in JRuby 9.2.0.0 (jeremyevans)
* Switch use of BigDecimal.new() to BigDecimal(), since the former is deprecated (jeremyevans)
* Add Sequel::VERSION_NUMBER for easier version comparisons (jeremyevans)
* Add Model.has_dataset? to determine if the model class has a dataset (AlexWayfer) (#1508)
* Support use of LIKE with ANY function on PostgreSQL by avoiding unnecessary use of ESCAPE syntax (jeremyevans)
* Disconnect connections left allocated by dead threads instead of returning the connections to the pool (jeremyevans)
* Make both threaded connection pools avoid disconnecting connections while holding the connection pool mutex (jeremyevans)
* Don't deadlock when disconnecting connections in the sharded_threaded connection pool when using connection_validator or connection_expiration extensions (jeremyevans)
* Don't modify hash argument passed in Model.nested_attributes in the nested_attributes plugin (jeremyevans)
* Avoid unnecessary hash creation in many places (jeremyevans)
* Fix duplicate objects in nested associations when eager_graphing cascaded many_to_one=>one_to_many associations (jeremyevans)
=== 5.8.0 (2018-05-01)
* Don't mark SQLAnywhere as supporting WITH in INSERT statement (jeremyevans)
* Support :search_path as a shard option on PostgreSQL (jeremyevans)
* Add Dataset#nowait for raising a Sequel::DatabaseLockTimeout when a locked row is encountered, supported on PostgreSQL, MySQL 8+, MSSQL, and Oracle (jeremyevans)
* Support Dataset#skip_locked on MySQL 8+ (jeremyevans)
* Make schema modification methods in the pg_enum extension work on a frozen Database object (jeremyevans)
* Support common table expressions and window functions on MySQL 8+ (jeremyevans)
* Ignore Dataset#explain :extended option on MySQL 5.7+, since extended output is then the MySQL default (jeremyevans)
* Work around REGEXP BINARY not working correctly on MySQL 8+ by using REGEXP_LIKE with the 'c' match_type (jeremyevans)
* Force correct column order in Database#foreign_key_list on MySQL (jeremyevans)
* Add ConnectionPool#connection_expiration_random_delay to connection_expiration extension, to avoid thundering herd if preallocating connections (hex2a, jeremyevans) (#1503)
* Emit deprecation warning in association_proxies plugin if using #filter on an association proxy, since behavior will change on ruby 2.6+ (utilum) (#1497)
* Handle multiple add_constraint calls and a set_column_null call in the same alter_table block on SQLite (jeremyevans) (#1498)
* Add Database#rename_enum to the pg_enum extension (AlexWayfer) (#1495)
* Make tactical_eager_loading plugin respect the :allow_eager association option (jeremyevans) (#1494)
* Add pg_auto_constraint_validations plugin, for automatically converting constraint violations to validation failures on PostgreSQL (jeremyevans)
* Don't make Model#_valid? public in the error_splitter plugin (jeremyevans)
* Support Database#indexes :include_partial option on PostgreSQL for including partial indexes (jeremyevans)
* Include more diagnostic information in Database#error_info on PostgreSQL (jeremyevans)
* Support Database#foreign_key_list :reverse option on PostgreSQL for parsing foreign key constraints that reference a given table (jeremyevans)
* Add Database#check_constraints on PostgreSQL for parsing CHECK constraints (jeremyevans)
* Don't use identity columns if :serial=>true or :type=>:serial|:bigserial column options are used (#1490) (jeremyevans)
* Cache Dataset#select_all datasets if no arguments are given (jeremyevans)
* Cache Dataset#returning datasets if no arguments are given (jeremyevans)
* Cache Dataset#qualify datasets if no argument is given (jeremyevans)
* Cache Dataset#lateral datasets (jeremyevans)
* Cache Dataset#from_self datasets if no options are given (jeremyevans)
* Cache Dataset#distinct datasets if no arguments or block is given (jeremyevans)
=== 5.7.0 (2018-04-01)
* Add Sequel.start_timer and .elapsed_seconds_since for more accurate elapsed time calculations on ruby 2.1+ (jeremyevans)
* Run Dataset#with_sql_{all,each,first,single_value} using a cached dataset to avoid clobbering the dataset's columns (jeremyevans)
* Add Database#convert_serial_to_identity on PostgreSQL 10.2+, which requires superuser access (jeremyevans)
* Fix Database#server_version when connecting to PostgreSQL 10.1+ in certain cases (jeremyevans)
* Free temporary clobs in the jdbc/oracle adapter to prevent a memory leak (jeremyevans) (#1482)
* Treat prepared statement errors due to changing types as disconnect errors in the postgres adapter (jeremyevans) (#1481)
* Add integer64 extension for treating Integer as a 64-bit integer when used as a generic type (jeremyevans)
* Allow many_to_pg_array remove_all_* method cast appropriately to work correctly for non-integer types (jeremyevans)
* Fix array_type for pg_array_to_many and many_to_pg_array associations in pg_array_associations plugin (jeremyevans)
* Use identity columns instead of serial columns for primary keys on PostgreSQL 10.2+ (jeremyevans)
* Support :identity option when creating columns on PostgreSQL 10+ to create identity columns (jeremyevans)
* Add Dataset#overriding_{system,user}_value on PostgreSQL for use with PostgreSQL 10+ identity columns (jeremyevans)
* Set :auto_increment schema entry correctly for PostgreSQL 10+ identity columns (jeremyevans)
=== 5.6.0 (2018-03-01)
* Dedup :db_type strings in schema hashes on Ruby 2.5+ (jeremyevans)
* Make schema_caching extension work with :callable_default schema values (jeremyevans)
* Freeze string valuse in hashes returned by Database#schema when using the schema_caching extension (jeremyevans)
* Protect migration file loading with a mutex to not break when multiple threads load migration files simultaneously (jeremyevans)
* Respect identifier mangling rules when renaming columns on Microsoft SQL Server (jeremyevans)
=== 5.5.0 (2018-01-31)
* Make Database#copy_table in the postgres adapter handle errors that occur while processing rows (jeremyevans) (#1470)
* Cache results of changed_columns method in local variables in many places for better performance (jeremyevans)
* Make modification_detection plugin not break column change detection for new objects (jeremyevans) (#1468)
* Make pg_range extension set :ruby_default schema value for recognized range defaults (jeremyevans)
* Make pg_interval extension set :ruby_default schema value for recognized interval defaults (jeremyevans)
* Make pg_json extension set :callable_default schema value for empty json/jsonb array/hash defaults (jeremyevans)
* Make pg_inet extension set :ruby_default schema value for recognized inet/cidr defaults (jeremyevans)
* Make pg_hstore extension set :callable_default schema value for empty hstore defaults (jeremyevans)
* Make pg_array extension set :callable_default schema value for recognized empty array defaults (jeremyevans) (#1466)
* Make defaults_setter plugin prefer :callable_default db_schema values over :ruby_default db_schema values (jeremyevans)
* Add defaults_setter plugin :cache option for caching default values returned (jeremyevans)
* Freeze string values in hashes returned by Database#schema (jeremyevans)
=== 5.4.0 (2018-01-04)
* Enable fractional seconds in timestamps on DB2 (jeremyevans) (#1463)
* Don't attempt to insert a second time if insert_select runs a query that doesn't return results, which can happen when triggers are used (jeremyevans)
* Make Dataset#insert_select on PostgreSQL and MSSQL return false instead of nil if the INSERT query is sent to the database but returns no rows (jeremyevans)
* Add index_caching extension for caching calls to Database#indexes (kenaniah, jeremyevans) (#1461)
* Allow Database#indexes on SQLite, MSSQL, SQLAnywhere, and DB2 to handle SQL::Identifier values (jeremyevans)
* Add pg_timestamptz extension for using timestamptz (timestamp with time zone) as the default timestamp type (jeremyevans)
* Support Sequel.date_{add,sub} :cast option for setting cast type in date_arithmetic extension (jeremyevans)
* Optimize Database#synchronize implementation on ruby 2.5+ (jeremyevans)
* Add class_table_inheritance plugin :ignore_subclass_columns option (brianphillips) (#1459)
* Make Dataset#to_xml in xml_serializer work with eager_graphed datasets (jeremyevans)
* Make Dataset#to_json in json_serializer work with eager_graphed datasets (jeremyevans)
* Cache Dataset#nullify dataset in the null_dataset extension (chanks) (#1456)
* Add datetime_parse_to_time extension, for parsing timestamp strings without offsets using DateTime.parse.to_time (jeremyevans) (#1455)
* Add WHERE NULL filter for Dataset#where calls with no existing filter, no argument, and where the virtual row block returns nil (jeremyevans)
=== 5.3.0 (2017-12-01)
* Add logger to Database instance before making first connection in bin/sequel (jeremyevans)
* Drop support for PostgreSQL <8.1 in Database#indexes (jeremyevans)
* Add synchronize_sql extension, for checking out a connection around SQL generation (KJTsanaktsidis, jeremyevans) (#1451)
* Deprecate Dataset#where calls with no existing filter, no argument, and where the virtual row block returns nil (jeremyevans) (#1454)
* Add DatasetModule#reverse for simpler use of descending orders (jeremyevans)
* Support WITH clauses in subqueries on SQLite, but not in UNION/INTERSECT/EXCEPT (jeremyevans)
* Hoist WITH clauses to INSERT statement level if INSERT subquery uses a CTE on MSSQL (jeremyevans)
* Respect indislive and ignore indcheckxmin index attributes when using Database#indexes on PostgreSQL (jeremyevans)
* Explicitly disallow use of server-side prepared statements when using Dataset#call in the jdbc/postgresql adapter (jeremyevans) (#1448)
* Support common table expressions, window functions, dropping CHECK constraints, and recognizing CURRENT_DATE defaults on MariaDB 10.2+ (jeremyevans)
* Make Database#reset_primary_key_sequence work on PostgreSQL 10+ (jeremyevans)
* Support :connect_sqls Database option for easily issuing sql commands on all new connections (jeremyevans)
* Support :extensions Database option for loading extensions when initializing, useful in connection strings (jeremyevans)
* Avoid warning if trying to rollback after a commit or rollback raises an exception in the postgres adapter (jeremyevans)
* Support Date::Infinity values in the pg_extended_date_support extension (jeremyevans)
=== 5.2.0 (2017-10-27)
* Fix type conversion for smallint unsigned and integer unsigned types on jdbc/mysql (jeremyevans) (#1443)
* Add pg_extended_date_support extension, for handling infinite and BC dates/timestamps (jeremyevans)
* Do not ignore existing @dataset instance variable when subclassing Sequel::Model (bjmllr) (#1435)
=== 5.1.0 (2017-10-01)
* Make jdbc/h2 and jdbc/hsqldb adapters respect :foreign_key_constraint_name option when adding new foreign key column (jeremyevans)
* Do not issue unnecessary query for macaddr type oid when loading the pg_inet extension (jeltz) (#1423)
* Make alter_table add_foreign_key with a column symbol reversible when using the :foreign_key_constraint_name option (jeremyevans) (#1422)
* Do not raise an error if calling Model.freeze on a frozen model (jeremyevans) (#1421)
* Make Database#copy_into in the jdbc/postgresql adapter handle multi-byte strings (ckoenig) (#1416)
* Remove deprecated Model use_after_commit_rollback class and instance methods (jeremyevans)
* Remove deprecated Model.allowed_columns method in the base model support (jeremyevans)
* Remove deprecated Model.plugin_module_defined? private method (jeremyevans)
* Remove deprecated support for Model#_before_validation private method (jeremyevans)
=== 5.0.0 (2017-09-01)
* Make bin/sequel -M option always use base 10 (jeremyevans)
* Don't use savepoints when creating indexes inside a transaction on databases that don't support transactional schema modifications (jeremyevans) (#1407)
* Support :if_not_exists option when creating indexes on PostgreSQL 9.5+ (DyegoCosta) (#1405)
* Make threaded connection pools not block while connections are being made (jeremyevans)
* SQL::Expression#clone and #dup now return self, since all expressions should be frozen value objects (jeremyevans)
* Don't create empty arrays for unused association callbacks (jeremyevans)
* Cache association method name symbols instead of recomputing them everytime (jeremyevans)
* Raise an exception if attempting to create a prepared statement using a dataset with a delayed evaluation (jeremyevans)
* Make ConnectionPool#size thread safe by using the pool mutex (jeremyevans)
* Use instance_exec instead of instance_eval when passing a block, to work with lambdas that accept no arguments (jeremyevans)
* Freeze SQL::StringAgg instances in string_agg extension (jeremyevans)
* Freeze SQL::DateAdd instances in date_arithmetic extension (jeremyevans)
* Freeze SQL::Expression.comparison_attrs (jeremyevans)
* Rename SQL::Subscript#f to #expression, keeping #f as an alias (jeremyevans)
* Require the :pool_class Database option be a class to use a custom connection pool (jeremyevans)
* Make the class_table_inheritance plugin raise an Error during update if any UPDATE query does not affect a single row (jeremyevans)
* Change most send calls to public_send unless calling private methods is expected (jeremyevans)
* Database schema and schema generator methods now return nil (jeremyevans)
* Model#validates_unique in the validation helpers plugin now defaults to only checking on new or modified values (jeremyevans)
* Deprecate Model#_before_validation (private_method), use Model#before_validation now (jeremyevans)
* Always run before/after/around validation hooks when saving, even when not validating the object (jeremyevans)
* Deprecate Model use_after_commit_rollback class and instance accessors (jeremyevans)
* Deprecate Model.allowed_columns reader (jeremyevans)
* Freeze internal constants that shouldn't be modified at runtime (jeremyevans)
* Attempt to connect to the database immediately when creating the Database instance (jeremyevans)
* Make association_pks plugin delay the setting of associated objects until the current object is saved by default (jeremyevans)
* Joined datasets used as model datasets are now automatically wrapped in a subquery (jeremyevans)
* Setting an invalid dataset for a model class now raises an exception by default (jeremyevans)
* Getting all values for newly created models now happens before calling after_create, instead of after (jeremyevans)
* Remove use of @was_new/@columns_updated instance variables when saving model objects (jeremyevans)
* Disable symbol splitting by default (jeremyevans)
* Make datasets frozen by default (jeremyevans)
* Drop support for ruby 1.8.7, minimum now is 1.9.2 (jeremyevans)
* Remove deprecated adapters, extensions, plugins, constants, and features (jeremyevans)
=== Older
See doc/CHANGELOG.old
sequel-5.63.0/CONTRIBUTING 0000664 0000000 0000000 00000005514 14342141206 0014737 0 ustar 00root root 0000000 0000000 Issue Guidelines
----------------
1) Issues should only be created for things that are definitely bugs.
If you are not sure that the behavior is a bug, ask about it on
GitHub Discussions or the sequel-talk Google Group. GitHub Issues
should not be used as a help forum.
2) If you are sure it is a bug, then post a complete description of
the issue, the simplest possible self-contained example showing
the problem, the full backtrace of any exception, and for issues
involving database queries, an SQL log.
3) Issues are generally closed as soon as the problem is considered
fixed. However, discussion can still happen after the issue is
closed, and the issue will be reopened if additional evidence is
provided showing the issue still exists.
Pull Request Guidelines
-----------------------
1) Try to include tests for all new features and substantial bug
fixes. See the testing guide for details about testing Sequel.
2) Try to include documentation for all new features. In most cases
this should include RDoc method documentation, but updates to the
guides are also appropriate in some cases.
3) Follow the style conventions of the surrounding code. In most
cases, this is standard ruby style.
4) Do not submit whitespace changes with code changes. Sequel is not
pedantic about trailing whitespace, so if you have an editor that
automatically strips trailing whitespace, you may want to turn
that feature off.
5) All code in pull requests is assumed to be MIT licensed. Do not
submit a pull request if that isn't the case.
6) Please do not submit pull requests for code that is not ready to
be merged. Pull requests should not be used to "start a
conversation" about a possible code change. If the pull
request requires a conversation, that conversation should take
place on GitHub Discussions or the sequel-talk Google Group.
7) Pull requests are generally closed as soon as it appears that the
branch will not be merged. However, discussion about the code can
still happen after the pull request is closed, and the pull request
can be reopened if additional commits to the branch or other
changes make it likely that it will be merged.
Code of Conduct
---------------
This code of conduct applies to all of the project's "collaborative
space", which is defined as community communications channels,
including the Google Group, GitHub project, and source code repository.
1) Participants must ensure that their language and actions are free
of personal attacks and remarks disparaging to people or groups.
2) Behaviour which can be reasonably considered harassment will not
be tolerated.
3) Discussion should be limited to the project and related
technologies.
You can report a violation of this code of conduct to the project
maintainer, who will take appropriate action.
sequel-5.63.0/Gemfile 0000664 0000000 0000000 00000000046 14342141206 0014373 0 ustar 00root root 0000000 0000000 source 'https://rubygems.org'
gemspec
sequel-5.63.0/ISSUE_TEMPLATE.md 0000664 0000000 0000000 00000000600 14342141206 0015601 0 ustar 00root root 0000000 0000000 Note: If you have a question about Sequel, would like help using
Sequel, want to request a feature, or do anything else other than
submit a bug report, please use GitHub Discussions or the
sequel-talk Google Group.
### Complete Description of Issue
### Simplest Possible Self-Contained Example Showing the Bug
### Full Backtrace of Exception (if any)
### SQL Log (if any)
sequel-5.63.0/MIT-LICENSE 0000664 0000000 0000000 00000002074 14342141206 0014537 0 ustar 00root root 0000000 0000000 Copyright (c) 2007-2008 Sharon Rosner
Copyright (c) 2008-2022 Jeremy Evans
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
sequel-5.63.0/README.rdoc 0000664 0000000 0000000 00000103353 14342141206 0014713 0 ustar 00root root 0000000 0000000 == Sequel: The Database Toolkit for Ruby
Sequel is a simple, flexible, and powerful SQL database access
toolkit for Ruby.
* Sequel provides thread safety, connection pooling and a concise
DSL for constructing SQL queries and table schemas.
* Sequel includes a comprehensive ORM layer for mapping
records to Ruby objects and handling associated records.
* Sequel supports advanced database features such as prepared
statements, bound variables, savepoints, two-phase commit,
transaction isolation, primary/replica configurations, and
database sharding.
* Sequel currently has adapters for ADO, Amalgalite,
IBM_DB, JDBC, MySQL, Mysql2, ODBC, Oracle,
PostgreSQL, SQLAnywhere, SQLite3, and TinyTDS.
== Resources
Website :: https://sequel.jeremyevans.net
RDoc Documentation :: https://sequel.jeremyevans.net/rdoc
Source Code :: https://github.com/jeremyevans/sequel
Bug tracking (GitHub Issues) :: https://github.com/jeremyevans/sequel/issues
Discussion Forum (GitHub Discussions) :: https://github.com/jeremyevans/sequel/discussions
Alternate Discussion Forum (sequel-talk Google Group) :: http://groups.google.com/group/sequel-talk
If you have questions about how to use Sequel, please ask on
GitHub Discussions or the sequel-talk Google Group.
Only use the the bug tracker to report
bugs in Sequel, not to ask for help on using Sequel.
To check out the source code:
git clone git://github.com/jeremyevans/sequel.git
=== Contact
If you have any comments or suggestions please post to the Google group.
== Installation
gem install sequel
== A Short Example
require 'sequel'
DB = Sequel.sqlite # memory database, requires sqlite3
DB.create_table :items do
primary_key :id
String :name
Float :price
end
items = DB[:items] # Create a dataset
# Populate the table
items.insert(name: 'abc', price: rand * 100)
items.insert(name: 'def', price: rand * 100)
items.insert(name: 'ghi', price: rand * 100)
# Print out the number of records
puts "Item count: #{items.count}"
# Print out the average price
puts "The average price is: #{items.avg(:price)}"
== The Sequel Console
Sequel includes an IRB console for quick access to databases (usually referred to as bin/sequel). You can use it like this:
sequel sqlite://test.db # test.db in current directory
You get an IRB session with the Sequel::Database object stored in DB.
In addition to providing an IRB shell (the default behavior), bin/sequel also has support for migrating databases, dumping schema migrations, and copying databases. See the {bin/sequel guide}[rdoc-ref:doc/bin_sequel.rdoc] for more details.
== An Introduction
Sequel is designed to take the hassle away from connecting to databases and manipulating them. Sequel deals with all the boring stuff like maintaining connections, formatting SQL correctly and fetching records so you can concentrate on your application.
Sequel uses the concept of datasets to retrieve data. A Dataset object encapsulates an SQL query and supports chainability, letting you fetch data using a convenient Ruby DSL that is both concise and flexible.
For example, the following one-liner returns the average GDP for countries in the middle east region:
DB[:countries].where(region: 'Middle East').avg(:GDP)
Which is equivalent to:
SELECT avg(GDP) FROM countries WHERE region = 'Middle East'
Since datasets retrieve records only when needed, they can be stored and later reused. Records are fetched as hashes, and are accessed using an +Enumerable+ interface:
middle_east = DB[:countries].where(region: 'Middle East')
middle_east.order(:name).each{|r| puts r[:name]}
Sequel also offers convenience methods for extracting data from Datasets, such as an extended +map+ method:
middle_east.map(:name) # => ['Egypt', 'Turkey', 'Israel', ...]
middle_east.map([:id, :name]) # => [[1, 'Egypt'], [3, 'Turkey'], [2, 'Israel'], ...]
Or getting results as a hash via +as_hash+, with one column as key and another as value:
middle_east.as_hash(:name, :area) # => {'Israel' => 20000, 'Turkey' => 120000, ...}
== Getting Started
=== Connecting to a database
To connect to a database you simply provide Sequel.connect with a URL:
require 'sequel'
DB = Sequel.connect('sqlite://blog.db') # requires sqlite3
The connection URL can also include such stuff as the user name, password, and port:
DB = Sequel.connect('postgres://user:password@host:port/database_name') # requires pg
You can also specify optional parameters, such as the connection pool size, or loggers for logging SQL queries:
DB = Sequel.connect("postgres://user:password@host:port/database_name",
max_connections: 10, logger: Logger.new('log/db.log'))
It is also possible to use a hash instead of a connection URL, but make sure to include the :adapter option in this case:
DB = Sequel.connect(adapter: :postgres, user: 'user', password: 'password', host: 'host', port: port,
database: 'database_name', max_connections: 10, logger: Logger.new('log/db.log'))
You can specify a block to connect, which will disconnect from the database after it completes:
Sequel.connect('postgres://user:password@host:port/database_name'){|db| db[:posts].delete}
=== The DB convention
Throughout Sequel's documentation, you will see the +DB+ constant used to refer to the Sequel::Database instance you create.
This reflects the recommendation that for an app with a single Sequel::Database instance, the Sequel convention is to store
the instance in the +DB+ constant. This is just a convention, it's not required, but it is recommended.
Note that some frameworks that use Sequel may create the Sequel::Database instance for you, and you might not know
how to access it. In most cases, you can access the Sequel::Database instance through Sequel::Model.db.
=== Arbitrary SQL queries
You can execute arbitrary SQL code using Database#run:
DB.run("create table t (a text, b text)")
DB.run("insert into t values ('a', 'b')")
You can also create datasets based on raw SQL:
dataset = DB['select id from items']
dataset.count # will return the number of records in the result set
dataset.map(:id) # will return an array containing all values of the id column in the result set
You can also fetch records with raw SQL through the dataset:
DB['select * from items'].each do |row|
p row
end
You can use placeholders in your SQL string as well:
name = 'Jim'
DB['select * from items where name = ?', name].each do |row|
p row
end
=== Getting Dataset Instances
Datasets are the primary way records are retrieved and manipulated. They are generally created via the Database#from or Database#[] methods:
posts = DB.from(:posts)
posts = DB[:posts] # same
Datasets will only fetch records when you tell them to. They can be manipulated to filter records, change ordering, join tables, etc. Datasets are always frozen, and they are safe to use by multiple threads concurrently.
=== Retrieving Records
You can retrieve all records by using the +all+ method:
posts.all
# SELECT * FROM posts
The +all+ method returns an array of hashes, where each hash corresponds to a record.
You can also iterate through records one at a time using +each+:
posts.each{|row| p row}
Or perform more advanced stuff:
names_and_dates = posts.map([:name, :date])
old_posts, recent_posts = posts.partition{|r| r[:date] < Date.today - 7}
You can also retrieve the first record in a dataset:
posts.order(:id).first
# SELECT * FROM posts ORDER BY id LIMIT 1
Note that you can get the first record in a dataset even if it isn't ordered:
posts.first
# SELECT * FROM posts LIMIT 1
If the dataset is ordered, you can also ask for the last record:
posts.order(:stamp).last
# SELECT * FROM posts ORDER BY stamp DESC LIMIT 1
You can also provide a filter when asking for a single record:
posts.first(id: 1)
# SELECT * FROM posts WHERE id = 1 LIMIT 1
Or retrieve a single value for a specific record:
posts.where(id: 1).get(:name)
# SELECT name FROM posts WHERE id = 1 LIMIT 1
=== Filtering Records
The most common way to filter records is to provide a hash of values to match to +where+:
my_posts = posts.where(category: 'ruby', author: 'david')
# WHERE ((category = 'ruby') AND (author = 'david'))
You can also specify ranges:
my_posts = posts.where(stamp: (Date.today - 14)..(Date.today - 7))
# WHERE ((stamp >= '2010-06-30') AND (stamp <= '2010-07-07'))
Or arrays of values:
my_posts = posts.where(category: ['ruby', 'postgres', 'linux'])
# WHERE (category IN ('ruby', 'postgres', 'linux'))
By passing a block to where, you can use expressions (this is fairly "magical"):
my_posts = posts.where{stamp > Date.today << 1}
# WHERE (stamp > '2010-06-14')
my_posts = posts.where{stamp =~ Date.today}
# WHERE (stamp = '2010-07-14')
If you want to wrap the objects yourself, you can use expressions without the "magic":
my_posts = posts.where(Sequel[:stamp] > Date.today << 1)
# WHERE (stamp > '2010-06-14')
my_posts = posts.where(Sequel[:stamp] =~ Date.today)
# WHERE (stamp = '2010-07-14')
Some databases such as PostgreSQL and MySQL also support filtering via Regexps:
my_posts = posts.where(category: /ruby/i)
# WHERE (category ~* 'ruby')
You can also use an inverse filter via +exclude+:
my_posts = posts.exclude(category: ['ruby', 'postgres', 'linux'])
# WHERE (category NOT IN ('ruby', 'postgres', 'linux'))
But note that this does a full inversion of the filter:
my_posts = posts.exclude(category: ['ruby', 'postgres', 'linux'], id: 1)
# WHERE ((category NOT IN ('ruby', 'postgres', 'linux')) OR (id != 1))
If at any point you want to use a custom SQL fragment for part of a query,
you can do so via +Sequel.lit+:
posts.where(Sequel.lit('stamp IS NOT NULL'))
# WHERE (stamp IS NOT NULL)
You can safely interpolate parameters into the custom SQL fragment by
providing them as additional arguments:
author_name = 'JKR'
posts.where(Sequel.lit('(stamp < ?) AND (author != ?)', Date.today - 3, author_name))
# WHERE ((stamp < '2010-07-11') AND (author != 'JKR'))
Datasets can also be used as subqueries:
DB[:items].where(Sequel[:price] > DB[:items].select{avg(price) + 100})
# WHERE (price > (SELECT avg(price) + 100 FROM items))
After filtering, you can retrieve the matching records by using any of the retrieval methods:
my_posts.each{|row| p row}
See the {Dataset Filtering}[rdoc-ref:doc/dataset_filtering.rdoc] file for more details.
=== Security
Designing apps with security in mind is a best practice.
Please read the {Security Guide}[rdoc-ref:doc/security.rdoc] for details on security
issues that you should be aware of when using Sequel.
=== Summarizing Records
Counting records is easy using +count+:
posts.where(Sequel.like(:category, '%ruby%')).count
# SELECT COUNT(*) FROM posts WHERE (category LIKE '%ruby%' ESCAPE '\')
And you can also query maximum/minimum values via +max+ and +min+:
max = DB[:history].max(:value)
# SELECT max(value) FROM history
min = DB[:history].min(:value)
# SELECT min(value) FROM history
Or calculate a sum or average via +sum+ and +avg+:
sum = DB[:items].sum(:price)
# SELECT sum(price) FROM items
avg = DB[:items].avg(:price)
# SELECT avg(price) FROM items
=== Ordering Records
Ordering datasets is simple using +order+:
posts.order(:stamp)
# ORDER BY stamp
posts.order(:stamp, :name)
# ORDER BY stamp, name
+order+ always overrides the existing order:
posts.order(:stamp).order(:name)
# ORDER BY name
If you would like to add to the existing order, use +order_append+ or +order_prepend+:
posts.order(:stamp).order_append(:name)
# ORDER BY stamp, name
posts.order(:stamp).order_prepend(:name)
# ORDER BY name, stamp
You can also specify descending order:
posts.reverse_order(:stamp)
# ORDER BY stamp DESC
posts.order(Sequel.desc(:stamp))
# ORDER BY stamp DESC
=== Core Extensions
Note the use of Sequel.desc(:stamp) in the above example. Much of Sequel's DSL uses this style, calling methods on the Sequel module that return SQL expression objects. Sequel also ships with a {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc] that integrates Sequel's DSL better into the Ruby language, allowing you to write:
:stamp.desc
instead of:
Sequel.desc(:stamp)
=== Selecting Columns
Selecting specific columns to be returned is also simple using +select+:
posts.select(:stamp)
# SELECT stamp FROM posts
posts.select(:stamp, :name)
# SELECT stamp, name FROM posts
Like +order+, +select+ overrides an existing selection:
posts.select(:stamp).select(:name)
# SELECT name FROM posts
As you might expect, there is an +order_append+ equivalent for +select+ called +select_append+:
posts.select(:stamp).select_append(:name)
# SELECT stamp, name FROM posts
=== Deleting Records
Deleting records from the table is done with +delete+:
posts.where(Sequel[:stamp] < Date.today - 3).delete
# DELETE FROM posts WHERE (stamp < '2010-07-11')
Be very careful when deleting, as +delete+ affects all rows in the dataset.
Call +where+ first and +delete+ second:
# DO THIS:
posts.where(Sequel[:stamp] < Date.today - 7).delete
# NOT THIS:
posts.delete.where(Sequel[:stamp] < Date.today - 7)
=== Inserting Records
Inserting records into the table is done with +insert+:
posts.insert(category: 'ruby', author: 'david')
# INSERT INTO posts (category, author) VALUES ('ruby', 'david')
=== Updating Records
Updating records in the table is done with +update+:
posts.where(Sequel[:stamp] < Date.today - 7).update(state: 'archived')
# UPDATE posts SET state = 'archived' WHERE (stamp < '2010-07-07')
You can provide arbitrary expressions when choosing what values to set:
posts.where(Sequel[:stamp] < Date.today - 7).update(backup_number: Sequel[:backup_number] + 1)
# UPDATE posts SET backup_number = (backup_number + 1) WHERE (stamp < '2010-07-07'))))
As with +delete+, +update+ affects all rows in the dataset, so +where+ first,
+update+ second:
# DO THIS:
posts.where(Sequel[:stamp] < Date.today - 7).update(state: 'archived')
# NOT THIS:
posts.update(state: 'archived').where(Sequel[:stamp] < Date.today - 7)
=== Merging records
Merging records using the SQL MERGE statement is done using merge* methods.
You use +merge_using+ to specify the merge source and join conditions.
You can use +merge_insert+, +merge_delete+, and/or +merge_update+ to set the
INSERT, DELETE, and UPDATE clauses for the merge. +merge_insert+ takes the same
arguments as +insert+, and +merge_update+ takes the same arguments as +update+.
+merge_insert+, +merge_delete+, and +merge_update+ can all be called with blocks,
to set the conditions for the related INSERT, DELETE, or UPDATE.
Finally, after calling all of the other merge_* methods, you call +merge+
to run the MERGE statement on the database.
ds = DB[:m1]
merge_using(:m2, i1: :i2).
merge_insert(i1: :i2, a: Sequel[:b]+11).
merge_delete{a > 30}.
merge_update(i1: Sequel[:i1]+:i2+10, a: Sequel[:a]+:b+20)
ds.merge
# MERGE INTO m1 USING m2 ON (i1 = i2)
# WHEN NOT MATCHED THEN INSERT (i1, a) VALUES (i2, (b + 11))
# WHEN MATCHED AND (a > 30) THEN DELETE
# WHEN MATCHED THEN UPDATE SET i1 = (i1 + i2 + 10), a = (a + b + 20)
=== Transactions
You can wrap a block of code in a database transaction using the Database#transaction method:
DB.transaction do
# BEGIN
posts.insert(category: 'ruby', author: 'david')
# INSERT
posts.where(Sequel[:stamp] < Date.today - 7).update(state: 'archived')
# UPDATE
end
# COMMIT
If the block does not raise an exception, the transaction will be committed.
If the block does raise an exception, the transaction will be rolled back,
and the exception will be reraised. If you want to rollback the transaction
and not raise an exception outside the block, you can raise the
Sequel::Rollback exception inside the block:
DB.transaction do
# BEGIN
posts.insert(category: 'ruby', author: 'david')
# INSERT
if posts.where('stamp < ?', Date.today - 7).update(state: 'archived') == 0
# UPDATE
raise Sequel::Rollback
end
end
# ROLLBACK
=== Joining Tables
Sequel makes it easy to join tables:
order_items = DB[:items].join(:order_items, item_id: :id).where(order_id: 1234)
# SELECT * FROM items
# INNER JOIN order_items ON (order_items.item_id = items.id)
# WHERE (order_id = 1234)
The important thing to note here is that item_id is automatically qualified with
the table being joined, and id is automatically qualified with the last table
joined.
You can then do anything you like with the dataset:
order_total = order_items.sum(:price)
# SELECT sum(price) FROM items
# INNER JOIN order_items ON (order_items.item_id = items.id)
# WHERE (order_id = 1234)
Note that the default selection in Sequel is *, which includes all columns
in all joined tables. Because Sequel returns results as a hash keyed by column name
symbols, if any tables have columns with the same name, this will clobber the columns
in the returned hash. So when joining you are usually going to want to change the
selection using +select+, +select_all+, and/or +select_append+.
== Column references in Sequel
Sequel expects column names to be specified using symbols. In addition, returned hashes always use symbols as their keys. This allows you to freely mix literal values and column references in many cases. For example, the two following lines produce equivalent SQL:
items.where(x: 1)
# SELECT * FROM items WHERE (x = 1)
items.where(1 => :x)
# SELECT * FROM items WHERE (1 = x)"
Ruby strings are generally treated as SQL strings:
items.where(x: 'x')
# SELECT * FROM items WHERE (x = 'x')
=== Qualifying identifiers (column/table names)
An identifier in SQL is a name that represents a column, table, or schema.
The recommended way to qualify columns is to use Sequel[][] or +Sequel.qualify+
Sequel[:table][:column]
Sequel.qualify(:table, :column)
# table.column
You can also qualify tables with schemas:
Sequel[:schema][:table]
# schema.table
or use multi-level qualification:
Sequel[:schema][:table][:column]
# schema.table.column
=== Expression aliases
You can alias identifiers using Sequel[].as or +Sequel.as+:
Sequel[:column].as(:alias)
Sequel.as(:column, :alias)
# column AS alias
You can use the Sequel.as method to alias arbitrary expressions, not just identifiers:
Sequel.as(DB[:posts].select{max(id)}, :p)
# (SELECT max(id) FROM posts) AS p
And most Sequel expression objects support an +as+ method for aliasing:
(Sequel[:column] + 2).as(:c_plus_2)
# (column + 2) AS c_plus_2
== Sequel Models
A model class wraps a dataset, and an instance of that class wraps a single record in the dataset.
Model classes are defined as regular Ruby classes inheriting from Sequel::Model:
DB = Sequel.connect('sqlite://blog.db')
class Post < Sequel::Model
end
When a model class is created, it parses the schema in the table from the database, and
automatically sets up accessor methods for all of the columns in the table (Sequel::Model
implements the active record pattern).
Sequel model classes assume that the table name is an underscored plural of the class name:
Post.table_name # => :posts
You can explicitly set the table name or even the dataset used:
class Post < Sequel::Model(:my_posts); end
# or:
class Post < Sequel::Model(DB[:my_posts]); end
If you pass a symbol to the Sequel::Model method, it assumes you are referring to the table with the same name. You can also call it with a dataset, which will set the defaults for all retrievals for that model:
class Post < Sequel::Model(DB[:my_posts].where(category: 'ruby')); end
class Post < Sequel::Model(DB[:my_posts].select(:id, :name).order(:date)); end
=== Model instances
Model instances are identified by a primary key. Sequel queries the database to determine the primary key for each model. The Model.[] method can be used to fetch records by their primary key:
post = Post[123]
The +pk+ method is used to retrieve the record's primary key value:
post.pk # => 123
If you want to override which column(s) to use as the primary key, you can use +set_primary_key+:
class Post < Sequel::Model
set_primary_key [:category, :title]
end
post = Post['ruby', 'hello world']
post.pk # => ['ruby', 'hello world']
You can also define a model class that does not have a primary key via +no_primary_key+, but then you lose the ability to easily update and delete records:
Post.no_primary_key
A single model instance can also be fetched by specifying a condition:
post = Post.first(title: 'hello world')
post = Post.first{num_comments < 10}
The dataset for a model class returns rows of model instances instead of plain hashes:
DB[:posts].first.class # => Hash
Post.first.class # => Post
=== Acts like a dataset
A model class forwards many methods to the underlying dataset. This means that you can use most of the +Dataset+ API to create customized queries that return model instances, e.g.:
Post.where(category: 'ruby').each{|post| p post}
You can also manipulate the records in the dataset:
Post.where{num_comments < 7}.delete
Post.where(Sequel.like(:title, /ruby/)).update(category: 'ruby')
=== Accessing record values
A model instance stores its values as a hash with column symbol keys, which you can access directly via the +values+ method:
post.values # => {:id => 123, :category => 'ruby', :title => 'hello world'}
You can read the record values as object attributes, assuming the attribute names are valid columns in the model's dataset:
post.id # => 123
post.title # => 'hello world'
If the record's attributes names are not valid columns in the model's dataset (maybe because you used +select_append+ to add a computed value column), you can use Model#[] to access the values:
post[:id] # => 123
post[:title] # => 'hello world'
You can also modify record values using attribute setters or the []= method.
post.title = 'hey there'
post[:title] = 'hey there'
That will just change the value for the object, it will not update the row in the database. To update the database row, call the +save+ method:
post.save
=== Mass assignment
You can also set the values for multiple columns in a single method call, using one of the mass-assignment methods. See the {mass assignment guide}[rdoc-ref:doc/mass_assignment.rdoc] for details. For example +set+ updates the model's column values without saving:
post.set(title: 'hey there', updated_by: 'foo')
and +update+ updates the model's column values and then saves the changes to the database:
post.update(title: 'hey there', updated_by: 'foo')
=== Creating new records
New model instances can be created by calling Model.new, which returns a new model instance without updating the database:
post = Post.new(title: 'hello world')
You can save the record to the database later by calling +save+ on the model instance:
post.save
If you want to create a new record and save it to the database at the same time, you can use Model.create:
post = Post.create(title: 'hello world')
You can also supply a block to Model.new and Model.create:
post = Post.new do |p|
p.title = 'hello world'
end
post = Post.create{|p| p.title = 'hello world'}
=== Hooks
You can execute custom code when creating, updating, or deleting records by defining hook methods. The +before_create+ and +after_create+ hook methods wrap record creation. The +before_update+ and +after_update+ hook methods wrap record updating. The +before_save+ and +after_save+ hook methods wrap record creation and updating. The +before_destroy+ and +after_destroy+ hook methods wrap destruction. The +before_validation+ and +after_validation+ hook methods wrap validation. Example:
class Post < Sequel::Model
def after_create
super
author.increase_post_count
end
def after_destroy
super
author.decrease_post_count
end
end
Note the use of +super+ if you define your own hook methods. Almost all Sequel::Model class and instance methods (not just hook methods) can be overridden safely, but you have to make sure to call +super+ when doing so, otherwise you risk breaking things.
For the example above, you should probably use a database trigger if you can. Hooks can be used for data integrity, but they will only enforce that integrity when you are modifying the database through model instances, and even then they are often subject to race conditions. It's best to use database triggers and database constraints to enforce data integrity.
=== Deleting records
You can delete individual records by calling +delete+ or +destroy+. The only difference between the two methods is that +destroy+ invokes +before_destroy+ and +after_destroy+ hook methods, while +delete+ does not:
post.delete # => bypasses hooks
post.destroy # => runs hooks
Records can also be deleted en-masse by calling delete and destroy on the model's dataset. As stated above, you can specify filters for the deleted records:
Post.where(category: 32).delete # => bypasses hooks
Post.where(category: 32).destroy # => runs hooks
Please note that if destroy is called, each record is deleted
separately, but delete deletes all matching records with a single
SQL query.
=== Associations
Associations are used in order to specify relationships between model classes that reflect relationships between tables in the database, which are usually specified using foreign keys. You specify model associations via class methods:
class Post < Sequel::Model
many_to_one :author
one_to_many :comments
one_to_one :first_comment, class: :Comment, order: :id
many_to_many :tags
one_through_one :first_tag, class: :Tag, order: :name, right_key: :tag_id
end
+many_to_one+ and +one_to_one+ create a getter and setter for each model object:
post = Post.create(name: 'hi!')
post.author = Author.first(name: 'Sharon')
post.author
+one_to_many+ and +many_to_many+ create a getter method, a method for adding an object to the association, a method for removing an object from the association, and a method for removing all associated objects from the association:
post = Post.create(name: 'hi!')
post.comments
comment = Comment.create(text: 'hi')
post.add_comment(comment)
post.remove_comment(comment)
post.remove_all_comments
tag = Tag.create(tag: 'interesting')
post.add_tag(tag)
post.remove_tag(tag)
post.remove_all_tags
Note that the remove_* and remove_all_* methods do not delete the object from the database, they merely disassociate the associated object from the receiver.
All associations add a dataset method that can be used to further filter or reorder the returned objects, or modify all of them:
# Delete all of this post's comments from the database
post.comments_dataset.destroy
# Return all tags related to this post with no subscribers, ordered by the tag's name
post.tags_dataset.where(subscribers: 0).order(:name).all
=== Eager Loading
Associations can be eagerly loaded via +eager+ and the :eager association option. Eager loading is used when loading a group of objects. It loads all associated objects for all of the current objects in one query, instead of using a separate query to get the associated objects for each current object. Eager loading requires that you retrieve all model objects at once via +all+ (instead of individually by +each+). Eager loading can be cascaded, loading association's associated objects.
class Person < Sequel::Model
one_to_many :posts, eager: [:tags]
end
class Post < Sequel::Model
many_to_one :person
one_to_many :replies
many_to_many :tags
end
class Tag < Sequel::Model
many_to_many :posts
many_to_many :replies
end
class Reply < Sequel::Model
many_to_one :person
many_to_one :post
many_to_many :tags
end
# Eager loading via .eager
Post.eager(:person).all
# eager is a dataset method, so it works with filters/orders/limits/etc.
Post.where{topic > 'M'}.order(:date).limit(5).eager(:person).all
person = Person.first
# Eager loading via :eager (will eagerly load the tags for this person's posts)
person.posts
# These are equivalent
Post.eager(:person, :tags).all
Post.eager(:person).eager(:tags).all
# Cascading via .eager
Tag.eager(posts: :replies).all
# Will also grab all associated posts' tags (because of :eager)
Reply.eager(person: :posts).all
# No depth limit (other than memory/stack), and will also grab posts' tags
# Loads all people, their posts, their posts' tags, replies to those posts,
# the person for each reply, the tag for each reply, and all posts and
# replies that have that tag. Uses a total of 8 queries.
Person.eager(posts: {replies: [:person, {tags: [:posts, :replies]}]}).all
In addition to using +eager+, you can also use +eager_graph+, which will use a single query to get the object and all associated objects. This may be necessary if you want to filter or order the result set based on columns in associated tables. It works with cascading as well, the API is similar. Note that using +eager_graph+ to eagerly load multiple *_to_many associations will cause the result set to be a cartesian product, so you should be very careful with your filters when using it in that case.
You can dynamically customize the eagerly loaded dataset by using a proc. This proc is passed the dataset used for eager loading, and should return a modified copy of that dataset:
# Eagerly load only replies containing 'foo'
Post.eager(replies: proc{|ds| ds.where(Sequel.like(text, '%foo%'))}).all
This also works when using +eager_graph+, in which case the proc is called with dataset to graph into the current dataset:
Post.eager_graph(replies: proc{|ds| ds.where(Sequel.like(text, '%foo%'))}).all
You can dynamically customize eager loads for both +eager+ and +eager_graph+ while also cascading, by making the value a single entry hash with the proc as a key, and the cascaded associations as the value:
# Eagerly load only replies containing 'foo', and the person and tags for those replies
Post.eager(replies: {proc{|ds| ds.where(Sequel.like(text, '%foo%'))} => [:person, :tags]}).all
=== Joining with Associations
You can use the +association_join+ method to add a join to the model's dataset based on the assocation:
Post.association_join(:author)
# SELECT * FROM posts
# INNER JOIN authors AS author ON (author.id = posts.author_id)
This comes with variants for different join types:
Post.association_left_join(:replies)
# SELECT * FROM posts
# LEFT JOIN replies ON (replies.post_id = posts.id)
Similar to the eager loading methods, you can use multiple associations and nested associations:
Post.association_join(:author, replies: :person).all
# SELECT * FROM posts
# INNER JOIN authors AS author ON (author.id = posts.author_id)
# INNER JOIN replies ON (replies.post_id = posts.id)
# INNER JOIN people AS person ON (person.id = replies.person_id)
=== Extending the underlying dataset
The recommended way to implement table-wide logic by defining methods on the dataset using +dataset_module+:
class Post < Sequel::Model
dataset_module do
def with_few_comments
where{num_comments < 30}
end
def clean_boring
with_few_comments.delete
end
end
end
This allows you to have access to your model API from filtered datasets as well:
Post.where(category: 'ruby').clean_boring
# DELETE FROM posts WHERE ((category = 'ruby') AND (num_comments < 30))
Inside +dataset_module+ blocks, there are numerous methods that support easy creation of dataset methods.
Most of these methods are named after the dataset methods themselves, such as +select+, +order+, and
+group+:
class Post < Sequel::Model
dataset_module do
where(:with_few_comments, Sequel[:num_comments] < 30)
select :with_title_and_date, :id, :title, :post_date
order :by_post_date, :post_date
limit :top10, 10
end
end
Post.with_few_comments.with_title_and_date.by_post_date.top10
# SELECT id, title, post_date
# FROM posts
# ORDER BY post_date
# LIMIT 10
One advantage of using these methods inside dataset_module blocks, instead of
defining methods manually, is that the created methods will generally cache
the resulting values and result in better performance.
=== Model Validations
You can define a +validate+ method for your model, which +save+
will check before attempting to save the model in the database.
If an attribute of the model isn't valid, you should add an error
message for that attribute to the model object's +errors+. If an
object has any errors added by the validate method, +save+ will
raise an error by default:
class Post < Sequel::Model
def validate
super
errors.add(:name, "can't be empty") if name.empty?
errors.add(:written_on, "should be in the past") if written_on >= Time.now
end
end
== Testing Sequel
Please see the {testing guide}[rdoc-ref:doc/testing.rdoc] for recommendations on testing
applications that use Sequel, as well as the how to run the tests for Sequel itself.
== Sequel Release Policy
New major versions of Sequel do not have a defined release policy, but historically have
occurred once every few years.
New minor versions of Sequel are released around once a month near the start of the month.
New tiny versions of Sequel are only released to address security issues or regressions
in the most current release.
== Ruby Support Policy
Sequel fully supports the currently supported versions of Ruby (MRI) and JRuby. It may
support unsupported versions of Ruby or JRuby, but such support may be dropped in any
minor version if keeping it becomes a support issue. The minimum Ruby version
required to run the current version of Sequel is 1.9.2, and the minimum JRuby version is
9.0.0.0.
== Maintainer
Jeremy Evans
sequel-5.63.0/Rakefile 0000664 0000000 0000000 00000014650 14342141206 0014553 0 ustar 00root root 0000000 0000000 require "rake"
require "rake/clean"
NAME = 'sequel'
VERS = lambda do
require File.expand_path("../lib/sequel/version", __FILE__)
Sequel.version
end
CLEAN.include ["sequel-*.gem", "rdoc", "coverage", "www/public/*.html", "www/public/rdoc*", "spec/bin-sequel-*"]
# Gem Packaging
desc "Build sequel gem"
task :package=>[:clean] do |p|
sh %{#{FileUtils::RUBY} -S gem build sequel.gemspec}
end
### Website
desc "Make local version of website"
task :website do
sh %{#{FileUtils::RUBY} www/make_www.rb}
end
### RDoc
RDOC_DEFAULT_OPTS = ["--line-numbers", '--title', 'Sequel: The Database Toolkit for Ruby']
begin
# Sequel uses hanna-nouveau for the website RDoc.
gem 'hanna-nouveau'
RDOC_DEFAULT_OPTS.concat(['-f', 'hanna'])
rescue Gem::LoadError
end
require "rdoc/task"
RDOC_OPTS = RDOC_DEFAULT_OPTS + ['--main', 'README.rdoc']
RDoc::Task.new do |rdoc|
rdoc.rdoc_dir = "rdoc"
rdoc.options += RDOC_OPTS
rdoc.rdoc_files.add %w"README.rdoc CHANGELOG MIT-LICENSE lib/**/*.rb doc/*.rdoc doc/release_notes/*.txt"
end
desc "Make rdoc for website"
task :website_rdoc=>[:website_rdoc_main, :website_rdoc_adapters, :website_rdoc_plugins]
RDoc::Task.new(:website_rdoc_main) do |rdoc|
rdoc.rdoc_dir = "www/public/rdoc"
rdoc.options += RDOC_OPTS + %w'--no-ignore-invalid'
rdoc.rdoc_files.add %w"README.rdoc CHANGELOG doc/CHANGELOG.old MIT-LICENSE lib/*.rb lib/sequel/*.rb lib/sequel/{connection_pool,dataset,database,model}/*.rb doc/*.rdoc doc/release_notes/*.txt lib/sequel/extensions/migration.rb"
end
RDoc::Task.new(:website_rdoc_adapters) do |rdoc|
rdoc.rdoc_dir = "www/public/rdoc-adapters"
rdoc.options += RDOC_DEFAULT_OPTS + %w'--main Sequel --no-ignore-invalid'
rdoc.rdoc_files.add %w"lib/sequel/adapters/**/*.rb"
end
RDoc::Task.new(:website_rdoc_plugins) do |rdoc|
rdoc.rdoc_dir = "www/public/rdoc-plugins"
rdoc.options += RDOC_DEFAULT_OPTS + %w'--main Sequel --no-ignore-invalid'
rdoc.rdoc_files.add %w"lib/sequel/{extensions,plugins}/**/*.rb doc/core_*"
end
### Specs
run_spec = proc do |file|
lib_dir = File.join(File.dirname(File.expand_path(__FILE__)), 'lib')
rubylib = ENV['RUBYLIB']
ENV['RUBYLIB'] ? (ENV['RUBYLIB'] += ":#{lib_dir}") : (ENV['RUBYLIB'] = lib_dir)
sh "#{FileUtils::RUBY} #{"-w" if RUBY_VERSION >= '3'} #{file}"
ENV['RUBYLIB'] = rubylib
end
spec_task = proc do |description, name, file, coverage, visibility|
desc description
task name do
run_spec.call(file)
end
if coverage
desc "#{description} with coverage"
task :"#{name}_cov" do
ENV['COVERAGE'] = coverage == true ? '1' : coverage
run_spec.call(file)
ENV.delete('COVERAGE')
end
end
if visibility
desc "Run specs with method visibility checking"
task :"#{name}_vis" do
ENV['CHECK_METHOD_VISIBILITY'] = '1'
run_spec.call(file)
ENV.delete('CHECK_METHOD_VISIBILITY')
end
end
end
desc "Run the core, model, and extension/plugin specs"
task :default => :spec
desc "Run the core, model, and extension/plugin specs"
task :spec => [:spec_core, :spec_model, :spec_plugin]
desc "Run the core, model, and extension/plugin specs with warnings"
task :spec_w => [:spec_core_w, :spec_model_w, :spec_plugin_w]
spec_task.call("Run core and model specs together", :spec_core_model, 'spec/core_model_spec.rb', "core-model", false)
spec_task.call("Run core specs", :spec_core, 'spec/core_spec.rb', false, false)
spec_task.call("Run model specs", :spec_model, 'spec/model_spec.rb', false, false)
spec_task.call("Run plugin/extension specs", :spec_plugin, 'spec/plugin_spec.rb', "plugin-extension", true)
spec_task.call("Run bin/sequel specs", :spec_bin, 'spec/bin_spec.rb', 'bin', false)
spec_task.call("Run core extensions specs", :spec_core_ext, 'spec/core_extensions_spec.rb', 'core-ext', true)
spec_task.call("Run integration tests", :spec_integration, 'spec/adapter_spec.rb none', '1', true)
%w'postgres sqlite mysql oracle mssql db2 sqlanywhere'.each do |adapter|
spec_task.call("Run #{adapter} tests", :"spec_#{adapter}", "spec/adapter_spec.rb #{adapter}", adapter, true)
end
spec_task.call("Run model specs without the associations code", :_spec_model_no_assoc, 'spec/model_no_assoc_spec.rb', false, false)
desc "Run model specs without the associations code"
task :spec_model_no_assoc do
ENV['SEQUEL_NO_ASSOCIATIONS'] = '1'
Rake::Task['_spec_model_no_assoc'].invoke
end
desc "Run core/model/extension/plugin specs with coverage"
task :spec_cov do
Rake::Cleaner.cleanup_files(::Rake::FileList["coverage"])
ENV['SEQUEL_MERGE_COVERAGE'] = '1'
Rake::Task['spec_bin_cov'].invoke
Rake::Task['spec_core_model_cov'].invoke
Rake::Task['spec_plugin_cov'].invoke
Rake::Task['spec_core_ext_cov'].invoke
ENV['NO_SEQUEL_PG'] = '1'
Rake::Task['spec_postgres_cov'].invoke
end
task :spec_ci=>[:spec_core, :spec_model, :spec_plugin, :spec_core_ext] do
mysql_host = "localhost"
pg_database = "sequel_test" unless ENV["DEFAULT_DATABASE"]
if ENV["MYSQL_ROOT_PASSWORD"]
mysql_password = "&password=root"
mysql_host= "127.0.0.1:3306"
end
if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'jruby'
ENV['SEQUEL_SQLITE_URL'] = "jdbc:sqlite::memory:"
ENV['SEQUEL_POSTGRES_URL'] = "jdbc:postgresql://localhost/#{pg_database}?user=postgres&password=postgres"
ENV['SEQUEL_MYSQL_URL'] = "jdbc:mysql://#{mysql_host}/sequel_test?user=root#{mysql_password}&useSSL=false&allowPublicKeyRetrieval=true"
else
ENV['SEQUEL_SQLITE_URL'] = "sqlite:/"
ENV['SEQUEL_POSTGRES_URL'] = "postgres://localhost/#{pg_database}?user=postgres&password=postgres"
ENV['SEQUEL_MYSQL_URL'] = "mysql2://#{mysql_host}/sequel_test?user=root#{mysql_password}&useSSL=false"
end
if RUBY_VERSION >= '2.3'
Rake::Task['spec_postgres'].invoke
end
if RUBY_VERSION >= '2.4'
Rake::Task['spec_sqlite'].invoke
Rake::Task['spec_mysql'].invoke
end
end
desc "Print Sequel version"
task :version do
puts VERS.call
end
desc "Check syntax of all .rb files"
task :check_syntax do
Dir['**/*.rb'].each{|file| print `#{FileUtils::RUBY} -c #{file} | fgrep -v "Syntax OK"`}
end
desc "Check documentation for plugin/extension files"
task :check_plugin_doc do
text = File.binread('www/pages/plugins.html.erb')
skip = %w'before_after_save freeze_datasets from_block no_auto_literal_strings auto_validations_constraint_validations_presence_message'
Dir['lib/sequel/{plugins,extensions}/*.rb'].map{|f| File.basename(f).sub('.rb', '') if File.size(f)}.sort.each do |f|
puts f if !f.start_with?('_') && !skip.include?(f) && !text.include?(f)
end
end
sequel-5.63.0/bin/ 0000775 0000000 0000000 00000000000 14342141206 0013650 5 ustar 00root root 0000000 0000000 sequel-5.63.0/bin/sequel 0000775 0000000 0000000 00000016706 14342141206 0015106 0 ustar 00root root 0000000 0000000 #!/usr/bin/env ruby
# frozen-string-literal: true
require 'optparse'
code = nil
copy_databases = nil
dump_migration = nil
dump_schema = nil
dump_indexes = nil
env = nil
migrate_dir = nil
migrate_ver = nil
backtrace = nil
show_version = false
test = true
load_dirs = []
exclusive_options = []
loggers = []
options = OptionParser.new do |opts|
opts.banner = "Sequel: The Database Toolkit for Ruby"
opts.define_head "Usage: sequel [options] [file]"
opts.separator ""
opts.separator "Examples:"
opts.separator " sequel sqlite://blog.db"
opts.separator " sequel postgres://localhost/my_blog"
opts.separator " sequel config/database.yml"
opts.separator ""
opts.separator "For more information see http://sequel.jeremyevans.net"
opts.separator ""
opts.separator "Options:"
opts.on_tail("-h", "-?", "--help", "Show this message") do
puts opts
exit
end
opts.on("-c", "--code CODE", "run the given code and exit") do |v|
code = v
exclusive_options << :c
end
opts.on("-C", "--copy-databases", "copy one database to another") do
copy_databases = true
exclusive_options << :C
end
opts.on("-d", "--dump-migration", "print database migration to STDOUT") do
dump_migration = true
exclusive_options << :d
end
opts.on("-D", "--dump-migration-same-db", "print database migration to STDOUT without type translation") do
dump_migration = :same_db
exclusive_options << :D
end
opts.on("-e", "--env ENV", "use environment config for database") do |v|
env = v
end
opts.on("-E", "--echo", "echo SQL statements") do
require 'logger'
loggers << Logger.new($stdout)
end
opts.on("-I", "--include dir", "specify $LOAD_PATH directory") do |v|
$: << v
end
opts.on("-l", "--log logfile", "log SQL statements to log file") do |v|
require 'logger'
loggers << Logger.new(v)
end
opts.on("-L", "--load-dir DIR", "loads all *.rb under specifed directory") do |v|
load_dirs << v
end
opts.on("-m", "--migrate-directory DIR", "run the migrations in directory") do |v|
migrate_dir = v
exclusive_options << :m
end
opts.on("-M", "--migrate-version VER", "migrate the database to version given") do |v|
migrate_ver = Integer(v, 10)
end
opts.on("-N", "--no-test-connection", "do not test the connection") do
test = false
end
opts.on("-r", "--require LIB", "require the library, before executing your script") do |v|
load_dirs << [v]
end
opts.on("-S", "--dump-schema filename", "dump the schema for all tables to the file") do |v|
dump_schema = v
exclusive_options << :S
end
opts.on("-t", "--trace", "Output the full backtrace if an exception is raised") do
backtrace = true
end
opts.on_tail("-v", "--version", "Show version") do
show_version = true
end
opts.on("-X", "--dump-indexes filename", "dump the index cache for all tables to the file") do |v|
dump_indexes = v
exclusive_options << :X
end
end
opts = options
opts.parse!
db = ARGV.shift
error_proc = lambda do |msg|
$stderr.puts(msg)
exit 1
end
extra_proc = lambda do
$stderr.puts("Warning: last #{ARGV.length} arguments ignored") unless ARGV.empty?
end
error_proc["Error: Must specify -m if using -M"] if migrate_ver && !migrate_dir
error_proc["Error: Cannot specify #{exclusive_options.map{|v| "-#{v}"}.join(' and ')} together"] if exclusive_options.length > 1
connect_proc = lambda do |database|
db_opts = {:test=>test, :loggers=>loggers}
if database.nil? || database.empty?
Sequel.connect('mock:///', db_opts)
elsif File.exist?(database)
require 'yaml'
env ||= "development"
db_config = YAML.load_file(database)
db_config = db_config[env] || db_config[env.to_sym] || db_config
db_config.keys.each{|k| db_config[k.to_sym] = db_config.delete(k)}
Sequel.connect(db_config, db_opts)
else
Sequel.connect(database, db_opts)
end
end
begin
$:.unshift(File.expand_path(File.join(File.dirname(__FILE__), '..', 'lib')))
require 'sequel'
if show_version
puts "sequel #{Sequel.version}"
unless db || code
exit
end
end
DB = connect_proc[db]
load_dirs.each{|d| d.is_a?(Array) ? require(d.first) : Dir["#{d}/**/*.rb"].each{|f| load(f)}}
if migrate_dir
extra_proc.call
Sequel.extension :migration, :core_extensions
Sequel::Migrator.apply(DB, migrate_dir, migrate_ver)
exit
end
if dump_migration
extra_proc.call
DB.extension :schema_dumper
puts DB.dump_schema_migration(:same_db=>dump_migration==:same_db)
exit
end
if dump_schema
extra_proc.call
DB.extension :schema_caching
DB.tables.each{|t| DB.schema(Sequel::SQL::Identifier.new(t))}
DB.dump_schema_cache(dump_schema)
exit
end
if dump_indexes
extra_proc.call
DB.extension :index_caching
DB.tables.each{|t| DB.indexes(Sequel::SQL::Identifier.new(t))}
DB.dump_index_cache(dump_indexes)
exit
end
if copy_databases
Sequel.extension :migration
DB.extension :schema_dumper
db2 = ARGV.shift
error_proc["Error: Must specify database connection string or path to yaml file as second argument for database you want to copy to"] if db2.nil? || db2.empty?
extra_proc.call
start_time = Time.now
TO_DB = connect_proc[db2]
same_db = DB.database_type==TO_DB.database_type
index_opts = {:same_db=>same_db}
# :nocov:
index_opts[:index_names] = :namespace if !DB.global_index_namespace? && TO_DB.global_index_namespace?
# :nocov:
if DB.database_type == :sqlite && !same_db
# SQLite integer types allows 64-bit integers
TO_DB.extension :integer64
end
puts "Databases connections successful"
schema_migration = eval(DB.dump_schema_migration(:indexes=>false, :same_db=>same_db))
index_migration = eval(DB.dump_indexes_migration(index_opts))
fk_migration = eval(DB.dump_foreign_key_migration(:same_db=>same_db))
puts "Migrations dumped successfully"
schema_migration.apply(TO_DB, :up)
puts "Tables created"
puts "Begin copying data"
DB.transaction do
TO_DB.transaction do
all_status_lines = ENV['SEQUEL_BIN_STATUS_ALL_LINES']
DB.tables.each do |table|
puts "Begin copying records for table: #{table}"
time = Time.now
to_ds = TO_DB.from(table)
j = 0
DB.from(table).each do |record|
to_ds.insert(record)
j += 1
if Time.now - time > 5 || all_status_lines
puts "Status: #{j} records copied"
time = Time.now
end
end
puts "Finished copying #{j} records for table: #{table}"
end
end
end
puts "Finished copying data"
puts "Begin creating indexes"
index_migration.apply(TO_DB, :up)
puts "Finished creating indexes"
puts "Begin adding foreign key constraints"
fk_migration.apply(TO_DB, :up)
puts "Finished adding foreign key constraints"
if TO_DB.database_type == :postgres
TO_DB.tables.each{|t| TO_DB.reset_primary_key_sequence(t)}
puts "Primary key sequences reset successfully"
end
puts "Database copy finished in #{Time.now - start_time} seconds"
exit
end
if code
extra_proc.call
eval(code)
exit
end
rescue => e
raise e if backtrace
error_proc["Error: #{e.class}: #{e.message}\n#{e.backtrace.first}"]
end
if !ARGV.empty?
ARGV.each{|v| load(v)}
elsif !$stdin.isatty
eval($stdin.read)
# :nocov:
else
require 'irb'
puts "Your database is stored in DB..."
IRB.start
end
# :nocov:
sequel-5.63.0/doc/ 0000775 0000000 0000000 00000000000 14342141206 0013645 5 ustar 00root root 0000000 0000000 sequel-5.63.0/doc/CHANGELOG.old 0000664 0000000 0000000 00001150065 14342141206 0015644 0 ustar 00root root 0000000 0000000 === 4.49.0 (2017-08-01)
* Make dataset_associations plugin automatically alias tables when using many_through_many associations that join the same table multiple times (jeremyevans)
* Deprecate using a :pool_class Database that is not a class or a symbol for a supported pool class (jeremyevans)
* Deprecate :eager_loading_predicate_key association option and association reflection method (jeremyevans)
* Deprecate Model.serialized_columns in the serialization plugin (jeremyevans)
* Deprecate Model.cti_columns in the class_table_inheritance plugin (jeremyevans)
* Deprecate SQL::AliasedExpression#aliaz, use #alias instead (jeremyevans)
* Deprecate SQL::Function#f, use #name instead (jeremyevans)
* Deprecate treating cross join with conditions as inner join on MySQL (jeremyevans)
* Deprecate ConnectionPool#created_count, use #size instead (jeremyevans)
* Deprecate ConnectionPool::CONNECTION_POOL_MAP, use the :pool_class option to specify a non-default connection pool (jeremyevans)
* Deprecate Sequel::IBMDB::Connection#prepared_statements= in the ibmdb adapter (jeremyevans)
* Deprecate DEFAULT_OPTIONS in validation_helpers, override default_validation_helpers_options private method instead (jeremyevans)
* Deprecate model association before callbacks returning false to cancel the action (jeremyevans)
* Support native offset syntax on Oracle 12 (timon) (#1397)
* Deprecate Dataset#nullify! in the null_dataset extension (jeremyevans)
* Deprecate Dataset#autoid=, #_fetch=, and #numrows= in the mock adapter (jeremyevans)
* Deprecate loading plugins by requiring sequel_#{plugin} (jeremyevans)
* Add Model.sti_class_from_sti_key in the single_table_inheritance plugin to get the appropriate class to use (Aryk) (#1396)
* Make Sequel::Error#cause use #wrapped_exception if it exists on ruby 2.1+ (jeremyevans)
* Make Dataset#where_all, #where_each, #where_single_value core dataset methods instead of just model dataset methods (jeremyevans)
* Make Database#extend_datasets and Dataset#with_extend now use a Dataset::DatasetModule instance if given a block (jeremyevans)
* Add Sequel::Dataset::DatasetModule, now a superclass of Sequel::Model::DatasetModule (jeremyevans)
* Make composition plugin with :mapping option work correctly if Model#get_column_value is overridden (jeremyevans)
* Support Dataset#paged_each :stream => false option on mysql2 to disable streaming (Aryk) (#1395)
* Make datetimeoffset handling in the jdbc/sqlserver adapter work on more drivers (jeremyevans)
* Make alter_table add_primary_key work correctly on H2 1.4+ (jeremyevans)
* Support :sslrootcert Database option in the postgres adapter (dleavitt) (#1391)
=== 4.48.0 (2017-07-01)
* Deprecate Model.<< (jeremyevans)
* Deprecate Dataset#{and,exclude_where,range,interval}, move to sequel_4_dataset_methods extension (jeremyevans)
* Make Database#indexes not include partial indexes on SQLite 3.8.8+ (jeremyevans)
* Make Database#indexes include indexes created automatically from unique constraints on SQLite 3.8.8+ (jeremyevans)
* Deprecate Sequel::Postgres::PG_TYPES, conversion procs should not be registered per-Database (jeremyevans)
* Add Database#add_conversion_proc method on PostgreSQL for registering conversion procs (jeremyevans)
* Deprecate unexpected values passed to Dataset#insert_conflict on SQLite (jeremyevans)
* Deprecate Sequel::SqlAnywhere::Dataset#convert_smallint_to_bool= method (jeremyevans)
* Deprecate Sequel::SqlAnywhere.convert_smallint_to_bool accessor (jeremyevans)
* Use savepoints around index creation if creating table inside transaction if ignore_index_errors is used (jeremyevans)
* Deprecate treating :natrual_inner join type on MySQL as NATURAL LEFT JOIN (jeremyevans)
* Deprecate Dataset#mssql_unicode_strings= on Microsoft SQL Server (jeremyevans)
* Preserve encoding when parsing PostgreSQL arrays (jeltz) (#1387)
* Deprecate external modification of Sequel::JDBC::TypeConvertor (jeremyevans)
* Deprecate Sequel::DB2.use_clob_as_blob accessor (jeremyevans)
* Add Database#use_clob_as_blob accessor on DB2 (jeremyevans)
* Deprecate SEQUEL_POSTGRES_USES_PG constant (jeremyevans)
* Do not swallow original exception if exception is raised inside Database#copy_table on PostgreSQL (jeremyevans)
* Deprecate Sequel::Postgres.client_min_messages and force_standard_strings accessors (jeremyevans)
* Deprecate Sequel::Postgres.use_iso_date_format accessor (jeremyevans)
* Do not allow connection in postgres adapter if postgres-pr driver is used and force_standard_strings is false (jeremyevans)
* Drop support for ancient postgres driver in postgres adapter, now only pg and postgres-pr drivers are supported (jeremyevans)
* Deprecate Sequel::MySQL.convert_invalid_date_time accessor (jeremyevans)
* Deprecate Sequel::MySQL.convert_tinyint_to_bool accessor (jeremyevans)
* Deprecate Sequel::MySQL.default_{charset,collate,engine} accessors (jeremyevans)
* Add Database#default_{charset,collate,engine} accessors on MySQL (jeremyevans)
* Make mock adapter thread safe (jeremyevans)
* Deprecate Sequel::JDBC::Dataset#convert_types accessor (jeremyevans)
* Add Dataset#with_convert_types in jdbc adapter (jeremyevans)
* Deprecate Sequel::IBMDB::Dataset#convert_smallint_to_bool= method (jeremyevans)
* Deprecate Sequel::IBMDB.convert_smallint_to_bool accessor (jeremyevans)
* Add Database#convert_smallint_to_bool accessor in the ibmdb adapter (jeremyevans)
* Deprecate sequel_3_dataset_methods extension (jeremyevans)
* Deprecate query_literals extension (jeremyevans)
* Deprecate using subtype conversion procs added after registering composite type in the pg_row extension (jeremyevans)
* Don't try canceling copy in Database#copy_into if copier is not created yet (aakashAu) (#1384)
* Deprecate global conversion procs added by pg_* extensions, when extension isn't loaded into Database instance (jeremyevans)
* Deprecate Sequel::Postgres::PGRange.register in the pg_range extension (jeremyevans)
* Deprecate Sequel::Postgres::PGArray.register in the pg_array extension (jeremyevans)
* Deprecate Database#copy_conversion_procs (private method) on PostgreSQL (jeremyevans)
* Deprecate Database#reset_conversion_procs on PostgreSQL (jeremyevans)
* Deprecate meta_def extension (jeremyevans)
* Make class_table_inheritance plugin with :alias option not use subquery for datasets that don't join (jeremyevans)
* Deprecate hash_aliases extension (jeremyevans)
* Deprecate filter_having extension (jeremyevans)
* Deprecate empty_array_ignore_nulls extension (jeremyevans)
* Deprecate Array#sql_array in the core_extensions extension (jeremyevans)
* Make validation_helpers plugin :allow_blank option work correctly when the blank extension is not loaded (jeremyevans)
* Make validation_class_methods plugin no longer require the blank extension (jeremyevans)
* Clear cached associations when touching associations in the touch plugin (jeremyevans)
* Make pg_array_associations model plugin load pg_array extension into database (jeremyevans)
* Remove support for :strict option in nested_attributes plugin, use :unmatched_pk option instead (jeremyevans)
* Make to_json class/dataset method in json_serializer plugin accept :instance_block option to pass block to Model#to_json (jeremyevans)
* Make to_json methods in json_serializer plugin accept blocks that are used to transform values before serializing to JSON (jeremyevans)
* Make Sequel.object_to_json pass block to #to_json (jeremyevans)
* Deprecate identifier_columns plugin, not needed with Sequel.split_symbols = false (jeremyevans)
* Make reloading column_conflicts plugin not remove existing conflict markings (jeremyevans)
* Deprecate cti_base_model, cti_key, and cti_model_map class methods in class_table_inheritance plugin (jeremyevans)
* Make Model.skip_auto_validations(:not_null) in the auto_validations plugin skip not null checks for columns with default values (jeremyevans)
* Make Database#copy_into in jdbc/postgresql adapter respect :server option (jeremyevans)
* Make #to_hash and #to_hash_groups handle options in the static_cache plugin, and add rename #to_hash to #as_hash (jeremyevans)
* Rename Dataset#to_hash to #as_hash, and add #to_hash as an alias, to allow undefing #to_hash to fix ruby calling it implicitly (jeremyevans) (#1375)
* Handle PG* constants deprecated in pg 0.21.0 in the postgres adapter (jeremyevans) (#1377, #1378)
* Support :association_pks_use_associated_table association option in association_pks plugin (jeremyevans)
* Make pg_hstore extension reset hstore conversion proc when running Database#reset_conversion_procs (jeremyevans)
* Fix incorrect SQL used for inserting into a CTI subclass sharing the primary table when using the :alias option (jeremyevans)
=== 4.47.0 (2017-06-01)
* Deprecate pg_typecast_on_load plugin, only useful on deprecated do and swift adapters (jeremyevans)
* Deprecate association_autoreloading and many_to_one_pk_lookup plugins, which were made the default model behavior in Sequel 4 (jeremyevans)
* Deprecate setting invalid datasets for models unless required_valid_table = false (jeremyevans)
* Make Model.require_valid_table = true not raise for datasets where Database#schema raises an error but Dataset#columns works (jeremyevans)
* Make Database#with_server in the server_block extension accept a second argument for a different read_only shard (jeremyevans) (#1355)
* Make schema_dumper extension handle Oracle 11g XE inclusion of not null in the db_type (StevenCregan, jeremyevans) (#1351)
* Add Model.default_association_type_options for changing default association options per association type (jeremyevans)
* Add :materialized option to Database#views on PostgreSQL to return materialized views (Blargel) (#1348)
* Make defaults_setter plugin inherit custom default values when subclassing (jeremyevans)
=== 4.46.0 (2017-05-01)
* Recognize additional disconnect error on MySQL (jeremyevans)
* Deconstantize dataset SQL generation, speeding up ruby 2.3+, slowing down earlier versions (jeremyevans)
* Deprecate calling Dataset#set_graph_aliases before Dataset#graph (jeremyevans)
* Don't swallow exception if there is an exception when rolling back a transaction when using :rollback=>:always option (jeremyevans)
* Deprecate passing 2 arguments to Database#alter_table (jeremyevans)
* Deprecate passing Schema::CreateTableGenerator instance as second argument to Database#create_table (jeremyevans)
* Deprecate Database::DatasetClass as a way for getting default dataset classes for datasets (jeremyevans)
* Deprecate SQLite pragma getting and setting methods (jeremyevans)
* Remove handling of EMULATED_FUNCTION_MAP from adapter dataset classes, overide Dataset#native_function_name instead (jeremyevans)
* Deprecate {Integer,Timestamp}Migrator::DEFAULT_SCHEMA_{COLUMN,TABLE} (jeremyevans)
* Deprecate Database#jdbc_* methods for jdbc/db2 adapter Database instances (jeremyevans)
* Remove addition of Database#jdbc_* to JDBC::Database in jdbc/db2 adapter (jeremyevans)
* Deprecate many internal Database and Dataset string/regexp constants in core and included adapters (jeremyevans)
* Remove use of Fixnum in sqlanywhere shared adapter (jeremyevans)
* Deprecate Sequel::Schema::Generator constant, use Sequel::Schema::CreateTableGenerator instead (jeremyevans)
* Deprecate Database#log_yield (jeremyevans)
* Deprecate the set_overrides extension (jeremyevans)
* If passing an empty array or hash and a block to a filtering method, ignore the array or hash and just use the block (jeremyevans)
* Deprecate ignoring explicit nil argument when there is no existing filter (jeremyevans)
* Deprecate ignoring explicit nil argument to filtering methods when passing a block (jeremyevans)
* Deprecate ignoring empty strings and other empty? arguments passed to the filtering methods without a block (jeremyevans)
* Deprecate calling filtering methods without an argument or a block (jeremyevans)
* Deprecate Sequel::VirtualRow#` to create literal SQL, use Sequel.lit instead (jeremyevans)
* Add auto_literal_strings extensions for treating plain strings passed to filtering/update methods as literal SQL (jeremyevans)
* Deprecate automatically treating plain strings passed to filtering/update methods as literal SQL (jeremyevans)
* Passing a PlaceholderLiteralString to a filtering method now uses parentheses around the expression (jeremyevans)
* Make Dataset#full_text_search work on Microsoft SQL Server when no_auto_literal_strings extension is used (jeremyevans)
* Fix Database#disconnect when using the single connection pool without an active connection (jeremyevans) (#1339)
* Handle conversion of datetimeoffset values when using the jdbc/sqlserver adapter in some configurations (iaddict, jeremyevans) (#1338)
* Fix conversion of some time values when using the jdbc/sqlserver adapter in some configurations (iaddict, jeremyevans) (#1337)
* Use microsecond precision for time values on Microsoft SQL Server, instead of millisecond precision (jeremyevans)
* Add Dataset#sqltime_precision private method for adapters to use different precision for Sequel::SQLTime than Time and Date (jeremyevans)
* Use utc timezone in Sequel::SQLTime.create if Sequel.application_timezone is :utc (jeremyevans) (#1336)
* Include migration filename in message about migration file without a single migration (jmettraux) (#1334)
* Deprecate conversion of - to _ in adapter schemes (jeremyevans)
* Don't quote function names that are SQL::Identifiers, unless SQL::Function#quoted is used (jeremyevans)
* Deprecate splitting virtual row method names (jeremyevans)
* Deprecate passing blocks to virtual row methods, move to virtual_row_method_block extension (jeremyevans)
* Deprecate Sequel::SQL::Expression#sql_literal and #lit (jeremyevans)
* Don't issue deprecation warnings on ruby 1.8.7, as Sequel 5 is dropping support for it (jeremyevans)
* Deprecate Sequel::BasicObject#remove_methods! (jeremyevans)
* Deprecate sequel/no_core_ext file (jeremyevans)
* Deprecate model dataset #insert_sql accepting model instances (jeremyevans)
* Deprecate model dataset #join_table and #graph accepting model classes (jeremyevans)
* Support :alias option to class_table_inheritance plugin, wrapping subclass datasets in a subquery to fix ambiguous column issues (jeremyevans)
* Deprecate Model.set_allowed_columns and Model#{set_all,set_only,update_all,update_only}, move to whitelist security plugin (jeremyevans)
* Do not raise MassAssignmentRestriction when setting nested attributes and using the :fields option, only check for fields given (jeremyevans)
* Do not add class methods for private methods definined in dataset_module (jeremyevans)
* Deprecate Model.def_dataset_method and Model.subset, move to def_dataset_method plugin (jeremyevans)
* Deprecate Model.finder and Model.prepared_finder, move to finder plugin (jeremyevans)
* Deprecate calling Model.db= on a model with a dataset (jeremyevans)
* Deprecate splitting symbols to look for qualified/aliased identifiers (e.g. :table__column) (jeremyevans)
* Allow optimized lookups and deletes for models using SQL::Identifier and SQL::QualifiedIdentifier values as the FROM table (jeremyevans)
=== 4.45.0 (2017-04-01)
* Correctly handle datasets with offsets but no limits used in compound datasets on MSSQL <2012 (jeremyevans)
* Correctly handle false values in the split_values plugin (bananarne) (#1333)
* Deprecate Dataset#dup/clone and Model.dup/clone (jeremyevans)
* Deprecate the schema and scissors plugins (jeremyevans)
* Deprecate Model.{lazy_attributes,nested_attributes,composition,serialization}_module accessors (jeremyevans)
* Deprecate Database#database_name on MySQL (jeremyevans)
* Deprecate Database#use on MySQL (jeremyevans)
* Make pg_hstore extension no longer update PG_NAMED_TYPES (jeremyevans)
* Deprecate Sequel::PG_NAMED_TYPES (jeremyevans)
* Add columns_updated plugin for making updated columns hash available in after_update and after_save hooks (jeremyevans)
* Deprecate accessing @columns_updated directly in model after_update and after_save hooks (jeremyevans)
* Deprecate Database#{add,remove}_servers when not using a sharded connection pool (jeremyevans)
* Deprecate Database#each_server (jeremyevans)
* Make Model#_valid? private method accept only an options hash (jeremyevans)
* Deprecate returning false from model before hooks to cancel the action, use Model#cancel_action (jeremyevans)
* Handle Model#cancel_action correctly in before hooks when Model#valid? is called (jeremyevans)
* Deprecate Sequel::BeforeHookFailed (jeremyevans)
* Deprecate passing multiple arguments as filter arguments when not using a conditions specifier (jeremyevans)
* Deprecate passing Procs as filter arguments, require they be passed as blocks (jeremyevans)
* Deprecate Sequel::Error::* exception class aliases (jeremyevans)
* Deprecate prepared_statements_associations and prepared_statements_with_pk plugins (jeremyevans)
* Deprecate Sequel::Unbinder, Sequel::UnbindDuplicate, and Dataset#unbind (jeremyevans)
* Deprecating calling Sequel::Qualifier with two arguments (jeremyevans)
* Add validation_contexts plugin for supporting custom contexts when validating (jeremyevans)
* Deprecate Sequel::Database.single_threaded singleton accessor (jeremyevans)
* Deprecate treating unrecognized prepared statement type as :select (jeremyevans)
* Deprecate Sequel.identifier_{in,out}put_method= and .quote_identifiers= singleton setters (jeremyevans)
* Deprecate Sequel::Database.identifier_{in,out}put_method and .quote_identifiers singleton accessors (jeremyevans)
* Deprecate loading the identifier_mangling by default, require it be loaded explicitly if needed (jeremyevans)
* Make Database#dump_{table_schema,schema_migration} in schema_dumper extension support :schema option (dadario) (#1328)
* Make Dataset#delete respect an existing limit on Microsoft SQL Server (jeremyevans)
* Add Dataset#skip_limit_check to mark a dataset as skipping the limit/offset check for updates and deletes (jeremyevans)
* Deprecate calling Dataset#{update/delete/truncate} on datasets with limits or offsets unless the database supports it (jeremyevans)
* Add deprecation message for using association_pks setter method with :delay_pks=>true association option (jeremyevans)
* Add deprecation message for using association_pks setter method without :delay_pks association option (jeremyevans)
* Deprecate having duplicate column names in subclass tables when using the class_table_inheritance plugin (jeremyevans)
* Deprecate do (DataObjects), swift, and jdbc/as400 adapters (jeremyevans)
* Deprecate support for Cubrid, Firebird, Informix, and Progress databases (jeremyevans)
* The :proxy_argument option passed to association_proxies plugin block is now an empty hash if no arguments are passed to the association method (jeremyevans)
* Deprecate passing non-hash arguments to association methods (jeremyevans)
* Deprecate passing multiple arguments to association methods (jeremyevans)
* Deprecate model transaction hook methods (jeremyevans)
* Drop support for pg <0.8.0 in the postgres adapter (jeremyevans)
* Deprecate passing a block to Database#from (jeremyevans)
* Deprecate Sequel::Model::ANONYMOUS_MODEL_CLASSES{,_MUTEX} (jeremyevans)
* Deprecate Sequel.cache_anonymous_models and Sequel.cache_anonymous_models= (jeremyevans)
* Automatically use from_self when using a dataset as part of a compound if it has an offset but no limit (jeremyevans)
* Drop order on existing datasets when using Dataset#union/intersect/except on Microsoft SQL Server unless a limit or offset is used (jeremyevans)
* Deprecate dataset mutation (jeremyevans)
* Handle dumping of autoincrementing 64-bit integer primary key columns correctly when using :same_db option in the schema dumper (jeremyevans) (#1324)
* Add Model.dataset_module_class accessor, allowing plugins to support custom behavior in dataset_module blocks (jeremyevans)
* Make ORDER BY come after UNION/INTERSECT/EXCEPT on Microsoft SQL Server and SQLAnywhere (jeremyevans)
* Make Database#indexes on MySQL handle qualified identifiers (jeremyevans) (#1316)
* Add oracle support to the odbc adapter (samuel02) (#1315)
=== 4.44.0 (2017-03-01)
* Add where_all, where_each, where_single_value model dataset methods, optimized for frozen datasets (jeremyevans)
* Add eager method to dataset_module (jeremyevans)
* Add implicit_subquery extension, for implicitly using a subquery for datasets using raw SQL when calling dataset methods that modify SQL (jeremyevans)
* Make Dataset#from_self keep the columns from the current dataset if present (jeremyevans)
* Add implicit_subquery extension, implicitly using subqueries for dataset methods if the current dataset uses raw SQL (jeremyevans)
* Make SQL::ValueList#inspect show that it is a value list (jeremyevans)
* Make LiteralString#inspect show that it is a literal string (jeremyevans)
* Make Model::Associations::AssociationReflection#inspect show reflection class and guess at association definition line (jeremyevans)
* Make SQLTime#inspect show it is an SQLTime instance, and only the time component (jeremyevans)
* Make SQL::Blob#inspect show that it is a blob, the number of bytes, and some or all of the content (jeremyevans)
* Make plugins not modify the constant namespace for the model class that uses them (jeremyevans)
* Do not modify encoding of SQL::Blob instances in force_encoding plugin (jeremyevans)
* Add Model.freeze_descendents to subclasses plugin, for easier finalizing associations/freezing of descendent classes (jeremyevans)
* Add Model.finalize_associations method for finalizing associations, speeding up some association reflections methods almost 10x (jeremyevans)
* Implement Model.freeze such that it can be used in production (jeremyevans)
* Recognize another disconnect error in the jdbc/as400 adapter (perlun) (#1300)
* Correctly handle conversion of false values when typecasting PostgreSQL arrays (mistoo) (#1299)
* Raise error if the postgres adapter attempts to load an incompatible version of sequel_pg (mahlonsmith) (#1298)
* Fix jdbc adapter so basic_type_convertor_map is not shared between instances, work with Database#freeze (jeremyevans)
=== 4.43.0 (2017-02-01)
* Make jdbc/postgresql adapter work if pg_hstore extension is loaded first (jeremyevans) (#1296)
* Make prepared_statements_associations plugin work correctly on some instance specific associations (jeremyevans)
* Make prepared_statements plugin not use prepared statements in cases where it is probably slower (jeremyevans)
* Optimize Model#refresh similar to Model.with_pk (jeremyevans)
* Make Database#extension not attempt to load the same extension more than once (jeremyevans)
* Implement Database#freeze such that it can be used in production (jeremyevans)
* Freeze enum_labels in the pg_enum extension (jeremyevans)
* Handle Database#type_supported? thread-safely on PostgreSQL (jeremyevans)
* Handle primary_key_sequences thread-safely on Oracle (jeremyevans)
* Handle sharding better when using mysql2 native prepared statements (jeremyevans)
* Use thread-safe incrementor for mock adapter autoid handling (jeremyevans)
* Make Model#freeze not freeze associations hash until after validating the model instance (jeremyevans)
* Make prepared_statements_associations plugin work correctly when model object explicitly specifies server to use when also using sharding plugin (jeremyevans)
* Make prepared_statements_with_pk plugin work correctly when dataset explicitly specifies server to use (jeremyevans)
* Make prepared_statements plugin work correctly when model object explicitly specifies server to use (jeremyevans)
* Make dataset_module inherited to subclasses when using the single_table_inheritance plugin (jeremyevans) (#1284)
* Support use of SQLite result codes in the jdbc-sqlite adapter, if the jdbc sqlite driver supports them (flash-gordon, jeremyevans) (#1283)
* Make timestamp migrator handle key length limitations when using MySQL with InnoDB engine and utf8mb4 charset default (jeremyevans) (#1282)
=== 4.42.0 (2017-01-01)
* Handle eager load callbacks correctly for one_to_one associations with orders or offsets when window functions are not supported (jeremyevans)
* Raise Sequel::Error if using an :eager_limit dataset option when eager loading a singular association (jeremyevans)
* Replace internal uses of Dataset#select_more with #select_append to save a method call (jeremyevans)
* Make Dataset#order_append the primary method, and #order_more the alias, for similarity to #select_append and #select_more (jeremyevans)
* Replace internal uses of Dataset#filter with #where to save a method call (jeremyevans)
* Do not set :auto_increment in the schema information for integer columns that are part of a composite primary key on SQLite (jeremyevans)
* Use autoincrement setting on integer primary key columns when emulating table modification methods on SQLite (thenrio, jeremyevans) (#1277, #1278)
* Make the pagination extension work on frozen datasets (jeremyevans)
* Make Dataset#server work for frozen model datasets using the sharding plugin (jeremyevans)
* Make Dataset#nullify in the null_dataset extension work on frozen datasets (jeremyevans)
* Make Model#set_server work when using a frozen model dataset (jeremyevans)
* Make Dataset#ungraphed work on a frozen model dataset (jeremyevans)
* Add Dataset#with_{autoid,fetch,numrows} to the mock adapter, returning cloned datasets with the setting changed (jeremyevans)
* Make looser_typecasting extension handle the strict BigDecimal parsing introduced in ruby 2.4rc1 (jeremyevans)
* Make Database#{db,opts}= in the sequel_3_dataset_methods extension raise for frozen datasets (jeremyevans)
* Speed up repeated calls to Dataset#{interval,range} for frozen datasets using a cached placeholder literalizer (jeremyevans)
* Speed up repeated calls to Dataset#get with a single argument for frozen datasets using a cached placeholder literalizer (jeremyevans)
* Speed up repeated calls to Dataset#{first,last} with arguments/blocks for frozen datasets using a cached placeholder literalizer (jeremyevans)
* Speed up repeated calls to Dataset#{avg,min,max,sum} for frozen datasets using a cached placeholder literalizer (jeremyevans)
* Cache dataset returned by Dataset#skip_locked for frozen datasets (jeremyevans)
* Cache dataset returned by Dataset#for_update for frozen datasets (jeremyevans)
* Cache dataset returned by Dataset#un{filtered,grouped,limited,ordered} for frozen datasets (jeremyevans)
* Cache dataset returned by Dataset#reverse (no args) for frozen datasets (jeremyevans)
* Cache dataset returned by Dataset#invert for frozen datasets (jeremyevans)
* Speed up repeated calls to Dataset#count with an argument or block for frozen datasets using a cached placeholder literalizer (jeremyevans)
* Using :on_duplicate_columns=>:warn Database option with duplicate_columns_handler now prepends file/line to the warning message (jeremyevans)
* Move identifier mangling code to identifier_mangling extension, load by default unless using :identifier_mangling=>false Database option (jeremyevans)
* Allow Dataset#with_extend to accept a block and create a module with that block that the object is extended with (jeremyevans)
* Speed up repeated calls to with_pk on the same frozen model dataset using a cached placeholder literalizer (jeremyevans)
* Add dataset_module methods such as select and order that define dataset methods which support caching for frozen datasets (jeremyevans)
* Cache subset datasets if they don't use blocks or procs for frozen model datasets (jeremyevans)
* Cache intermediate dataset used in Dataset#{last,paged_each} for frozen model datasets without an order (jeremyevans)
* Cache dataset returned by Dataset#naked for frozen datasets (jeremyevans)
* Cache intermediate dataset used in Dataset#last (no args) for frozen datasets (jeremyevans)
* Cache intermediate dataset used in Dataset#first (no args) and #single_record for frozen datasets (jeremyevans)
* Cache intermediate dataset used in Dataset#empty? for frozen datasets (jeremyevans)
* Cache intermediate dataset used in Dataset#count (no args) for frozen datasets (jeremyevans)
* Warn if :conditions option may be unexpectedly ignored during eager_graph/association_join (jeremyevans) (#1272)
* Cache SELECT and DELETE SQL for most frozen datasets (jeremyevans)
* Freeze most SQL::Expression objects and internal state by default (jeremyevans)
* Freeze Dataset::PlaceholderLiteralizer and Dataset::PlaceholderLiteralizer::Argument instances (jeremyevans)
* Freeze most dataset opts values to avoid unintentional modification (jeremyevans)
* Add Dataset#with_convert_smallint_to_bool on DB2, returning a clone with convert_smallint_to_bool set (jeremyevans)
* Make Dataset#freeze actually freeze the dataset on ruby 2.4+ (jeremyevans)
* Avoid using instance variables other than @opts for dataset data storage (jeremyevans)
* Add freeze_datasets extension, making all datasets for a given Database frozen (jeremyevans)
* Refactor prepared statement internals, using opts instead of instance variables (jeremyevans)
* Model.set_dataset now operates on a clone of the dataset given instead of modifying it, so it works with frozen datasets (jeremyevans)
=== 4.41.0 (2016-12-01)
* Add Dataset#with_mssql_unicode_strings on Microsoft SQL Server, returning a clone with mssql_unicode_strings set (jeremyevans)
* Add Dataset#with_identifier_output_method, returning a clone with identifier_output_method set (jeremyevans)
* Add Dataset#with_identifier_input_method, returning a clone with identifier_input_method set (jeremyevans)
* Add Dataset#with_quote_identifiers, returning a clone with quote_identifiers set (jeremyevans)
* Add Dataset#with_extend, returning a clone extended with given modules (jeremyevans)
* Add Dataset#with_row_proc, returning a clone with row_proc set (jeremyevans)
* Support use of SQL::AliasedExpressions as Model#to_json :include option keys in the json_serializer plugin (sensadrome) (#1269)
* Major improvements to type conversion in the ado adapter (vais, jeremyevans) (#1265)
* Avoid memory leak in ado adapter by closing result sets after yielding them (vais, jeremyevans) (#1259)
* Fix hook_class_methods plugin handling of commit hooks (jeremyevans)
* Make association dataset method correctly handle cases where key fields are nil (jeremyevans)
* Handle pure java exceptions that don't support message= when reraising the exception in the jdbc adapter (jeremyevans)
* Add support for :offset_strategy Database option on DB2, with :limit_offset and :offset_fetch values, to disable OFFSET emulation (#1254) (jeremyevans)
* Remove deprecated support for using Bignum class as a generic type (jeremyevans)
=== 4.40.0 (2016-10-28)
* Make column_select plugin not raise an exception if the model's table does not exist (jeremyevans)
* Make dataset_associations plugin correctly handle (many|one)_through_many associations with single join table (jeremyevans) (#1253)
* Add s extension, with adds Sequel::S module that includes private #S method for calling Sequel.expr, including use as refinement (jeremyevans)
* Add symbol_as and symbol_as_refinement extensions so that :column.as(:alias) is treated as Sequel.as(:column, :alias) (jeremyevans)
* Add symbol_aref and symbol_aref_refinement extensions so that :table[:column] is treated as Sequel.qualify(:table, :column) (jeremyevans)
* Add Sequel.split_symbols=, to support the disabling of splitting symbols with double/triple underscores (jeremyevans)
* Make SQL::QualifiedIdentifier convert SQL::Identifier arguments to strings, fixing Sequel[:schema][:table] usage in schema methods (jeremyevans)
* Do not attempt to combine non-associative operators (jeremyevans) (#1246)
* Automatically add NOT NULL to columns when adding primary keys if the database doesn't handle it (jeremyevans)
* Make prepared_statements plugin correctly handle lookup on joined datasets (jeremyevans) (#1244)
* Make Database#tables with :qualify=>true option handle table names with double underscores correctly (jeremyevans) (#1241)
* Add SQL::Identifier#[] and SQL::QualifiedIdentifier#[] for creating qualified identifiers (jeremyevans)
* Add support for Dataset#insert_conflict :conflict_where option, for a predicate to use in ON CONFLICT clauses (chanks) (#1240)
* Freeze Dataset::NON_SQL_OPTIONS, add private Dataset#non_sql_options, fixing thread safety issues during require (jeremyevans)
* Make the callable returned by Database#rollback_checker thread safe (jeremyevans)
* Make lazy_attributes and dataset_associations plugins work if insert_returning_select plugin is loaded before on model with no dataset (jeremyevans)
=== 4.39.0 (2016-10-01)
* Make active_model plugin use rollback_checker instead of after_rollback hook (jeremyevans)
* Add Database#rollback_checker, which returns a proc that returns whether the in progress transaction is rolled back (jeremyevans)
* Add Sequel::Database.set_shared_adapter_scheme to allow external adapters to support the mock adapter (jeremyevans)
* Make hook_class_methods plugin not use after commit/rollback model hooks (jeremyevans)
* Support add_column :after and :first options on MySQL (AnthonyBobsin, jeremyevans) (#1234)
* Support ActiveSupport 5 in pg_interval extension when weeks/hours are used in ActiveSupport::Duration objects (chanks) (#1233)
* Support IntegerMigrator :relative option, for running only the specified number of migrations up or down (jeremyevans)
* Make the touch plugin also touch associations on create in addition to update and delete (jeremyevans)
* Add :allow_manual_update timestamps plugin option for not overriding a manually set update timestamp (jeremyevans)
* Add Sequel.[] as an alias to Sequel.expr, for easier expression creation (jeremyevans)
* Add PostgreSQL full_text_search :to_tsquery=>:phrase option, for using PostgreSQL 9.6+ full text search phrase searching (jeremyevans)
* Add JSONBOp#insert in pg_json_ops extension, for jsonb_insert support on PostgreSQL 9.6+ (jeremyevans)
* Support add_column :if_not_exists option on PostgreSQL 9.6+ (jeremyevans)
=== 4.38.0 (2016-09-01)
* Support :driver_options option when using the postgres adapter with pg driver (jeremyevans)
* Don't use after commit/rollback database hooks if the model instance methods are not overridden (jeremyevans)
* Add SQL::NumericMethods#coerce, allowing code such as Sequel.expr{1 - x} (jeremyevans)
* Support ** operator for exponentiation on expressions, similar to +, -, *, and / (jeremyevans)
* Add Sequel::SQLTime.date= to set the date used for SQLTime instances (jeremyevans)
=== 4.37.0 (2016-08-01)
* Add support for regular expression matching on Oracle 10g+ using REGEXP_LIKE (johndcaldwell) (#1221)
* Recognize an additional disconnect error in the postgres adapter (jeremyevans)
* Make connection pool remove connections for disconnect errors not raised as DatabaseDisconnectError (jeremyevans)
* Support mysql2 0.4+ native prepared statements and bound variables (jeremyevans)
* Add Database#values for VALUES support on SQLite 3.8.3+ (jeremyevans)
* Support create_view :columns option on SQLite 3.9.0+ (jeremyevans)
* Make migration reverser handle alter_table add_constraint using a hash as the first argument (soupmatt) (#1215)
* Make ASTTransformer handle Sequel.extract (jeremyevans) (#1213)
=== 4.36.0 (2016-07-01)
* Deprecate use of Bignum class as generic type, since the behavior will change in ruby 2.4 (jeremyevans)
* Don't hold connection pool mutex while disconnecting connections (jeremyevans)
* Don't hold references to disconnected connections in the connection_validator extension (jeremyevans)
* Don't overwrite existing connection_validation_timeout when loading connection_validator extension multiple times (jeremyevans)
* Add connection_expiration extension, for automatically removing connections open for too long (pdrakeweb) (#1208, #1209)
* Handle disconnection errors raised during string literalization in mysql2 and postgres adapters (jeremyevans)
* Add string_agg extension for aggregate string concatenation support on many databases (jeremyevans)
* Add SQL::Function#order for ordered aggregate functions (jeremyevans)
* Support operator validation in constraint_validations for <, <=, >, and >= operators with string and integer arguments (jeremyevans)
* Make validates_operator validation consider nil values invalid unless :allow_nil or similar option is used (jeremyevans)
* Close cursors for non-SELECT queries in the oracle adapter after execution, instead of waiting until GC (jeremyevans) (#1203)
* Add :class_namespace association option for setting default namespace for :class option given as symbol/string (jeremyevans)
* Add Sequel::Model.cache_anonymous_models accessor for changing caching on a per-model basis (jeremyevans)
* Add Sequel::Model.def_Model for adding a Model() method to a module, for easier use of namespaced models (jeremyevans)
* Add Sequel::Model::Model() for creating subclasses of Sequel::Model subclasses, instead of just Sequel::Model itself (jeremyevans)
=== 4.35.0 (2016-06-01)
* Add :headline option to PostgreSQL Dataset#full_text_search for adding an extract of the matched text to the SELECT list (jeremyevans)
* Make :rollback=>:always inside a transaction use a savepoint automatically if supported (jeremyevans) (#1193)
* Recognize bool type as boolean in the schema dumper (jeremyevans) (#1192)
* Make Dataset#to_hash and #to_hash_groups work correctly for model datasets doing eager loading (jeremyevans)
* Make delay_add_association plugin handle hashes and primary keys passed to add_* association methods (jeremyevans) (#1187)
* Treat :Bignum as a generic type, to support 64-bit integers on ruby 2.4+, where Bignum == Integer (jeremyevans)
* Add server_logging extension for including server/shard information when logging queries (jeremyevans)
* Add Database#log_connection_info, for including connection information when logging queries (jeremyevans)
* Add Dataset#skip_locked for skipping locked rows on PostgreSQL 9.5+, MSSQL, and Oracle (jeremyevans)
* Allow Sequel::Model#lock! to accept an optional lock style (petedmarsh) (#1183)
* Add sql_comments extension for setting SQL comments on queries (jeremyevans)
* Make Postgres::PGRange#cover? handle empty, unbounded, and exclusive beginning ranges (jeremyevans)
* Fix frozen string literal issues on JRuby 9.1.0.0 (jeremyevans)
* Allow json_serializer :include option with cascaded values to work correctly when used with association_proxies (jeremyevans)
=== 4.34.0 (2016-05-01)
* Add support for :dataset_associations_join association option to dataset_associations plugin, for making resulting datasets have appropriate joins (jeremyevans)
* Log server connection was attempted to in PoolTimeout exception messages in sharded connection pool (jeremyevans)
* Log Database :name option in PoolTimeout exception messages (bigkevmcd, jeremyevans) (#1176)
* Add duplicate_columns_handler extension, for raising or warning if a dataset returns multiple columns with the same name (TSMMark, jeremyevans) (#1175)
* Support registering per-Database custom range types in the pg_range extension (steveh) (#1174)
* Support :preconnect=>:concurrently Database option for preconnecting in separate threads (kch, jeremyevans) (#1172)
* Make prepared_statements_safe plugin work correctly with CURRENT_DATE/CURRENT_TIMESTAMP defaults (jeremyevans) (#1168)
* Add validates_operator validation helper (petedmarsh) (#1170)
* Recognize additional unique constraint violation on Microsoft SQL Server (jeremyevans)
* Add :hash option to Dataset#(select|to)_hash(_groups)? methods for choosing object to populate (mwpastore) (#1167)
=== 4.33.0 (2016-04-01)
* Handle arbitrary objects passed as arguments to the association method (jeremyevans) (#1166)
* Handle array with multiple columns as Dataset#insert_conflict :target value on PostgreSQL (chanks) (#1165)
* Add Database#transaction :savepoint=>:only option, for only creating a savepoint if already inside a transaction (jeremyevans)
* Make Database#sequence_for_table on Oracle handle cases where the schema for a table cannot be determined (jeremyevans)
* The boolean_readers, boolean_subsets, and class_table_inheritance plugins no longer do blind rescues (jeremyevans) (#1162)
* Add Model.require_valid_table setting, if set to true doesn't swallow any errors for invalid tables (jeremyevans)
* Creating model classes inside a transaction when the table doesn't exist no longer rolls back the transaction on PostgreSQL (jeremyevans) (#1160)
* Sequel::Model no longer swallows many errors when subclassing or setting datasets (jeremyevans) (#1160)
* Handle altering column NULL settings for varchar(max) and text columns on MSSQL (Ilja Resch)
* Remove Sequel.firebird and Sequel.informix adapter methods (jeremyevans)
* Make graph_each extension handle result set splitting when using Dataset#first (jeremyevans)
* Allow raising Sequel::ValidationFailed and Sequel::HookFailed without an argument (jeremyevans)
* Allow schema_dumper to handle :qualify=>true option on PostgreSQL (jeremyevans)
* Allow foreign_key schema method to handle SQL::Identifier and SQL::QualifiedIdentifier as 2nd argument (jeremyevans)
=== 4.32.0 (2016-03-01)
* Use mutex for synchronizing access to association reflection cache on MRI (jeremyevans)
* Add Dataset#delete_from on MySQL, allowing deletions from multiple tables in a single query (jeremyevans) (#1146)
* Add no_auto_literal_strings extension, which makes SQL injection vulnerabilities less likely (jeremyevans)
* Add Model.default_association_options, for setting option defaults for all future associations (jeremyevans)
* Support :association_pks_nil association option in association_pks setter for determining how to handle nil (jeremyevans)
* Make association_pks setter handle empty array correctly when :delay_pks is set (jeremyevans)
* Add a setter method for one_through_one associations (jeremyevans)
* Include :remarks entry in JDBC schema parsing output, containing comments on the column (olleolleolle) (#1143)
* Support :eager_reload and :eager options to associations in tactical_eager_loading plugin (jeremyevans)
* Make tactical_eager_loading not eager load if passing proc or block to association method (jeremyevans)
* Make eager_each plugin handle eager loading for Dataset#first and similar methods (jeremyevans)
=== 4.31.0 (2016-02-01)
* Convert types in association_pks setters before saving them, instead of just before running queries (jeremyevans)
* Use getField and getOID instead of field and oid in the jdbc/postgresql adapter to work around JRuby 9.0.5.0 regression (jeremyevans) (#1137)
* Support using PostgreSQL-specific types in bound variables in the jdbc/postgresql adapter (jeremyevans)
* Add support for running with --enable-frozen-string-literal on ruby 2.3 (jeremyevans)
* Make Database#disconnect in the oracle adapter work correctly on newer versions of oci8 (jeremyevans)
* Support parsing PostgreSQL arrays with explicit bounds (jeremyevans) (#1131)
* Raise an error if attempting to use a migration file not containing a single migration (jeremyevans) (#1127)
* Automatically set referenced key for self referential foriegn key constraint for simple non-autoincrementing primary key on MySQL (jeremyevans) (#1126)
=== 4.30.0 (2016-01-04)
* Add Dataset#insert_conflict and #insert_ignore on SQLite for handling uniqueness violations (Sharpie) (#1121)
* Make Database#row_type in pg_row extension handle different formats of schema-qualified types (jeremyevans) (#1119)
* Add identifier_columns plugin for handling column names containing 2 or more consecutive underscores when saving (jeremyevans) (#1117)
* Support :eager_limit and :eager_limit_strategy dataset options in model eager loaders for per-call limits and strategies (chanks) (#1115)
* Allow IPv6 addresses in database URLs on ruby 1.9+ (hellvinz, jeremyevans) (#1113)
* Make Database#schema :db_type entries include sizes for string types on DB2 (jeremyevans)
* Make Database#schema :db_type entries include sizes for string and decimal types in the jdbc adapter's schema parsing (jeremyevans)
* Recognize another disconnect error in the tinytds adapter (jeremyevans)
=== 4.29.0 (2015-12-01)
* Add Model#json_serializer_opts method to json_serializer plugin, allowing for setting to_json defaults on per-instance basis (jeremyevans)
* Add uuid plugin for automatically setting UUID column when creating a model object (pdrakeweb, jeremyevans) (#1106)
* Allow the sqlanywhere adapter to work with sharding (jeremyevans)
* Support blobs as bound variables in the oracle adapter (jeremyevans) (#1104)
* Order by best results first when using the Database#full_text_search :rank option on PostgreSQL (chanks) (#1101)
* Run Database#table_exists? inside a savepoint if currently in a transaction and the database supports savepoints (jeremyevans) (#1100)
* Allow Database#transaction :retry_on option to work when using savepoints (jeremyevans)
* Allow for external adapters to implement Dataset#date_add_sql_append to integrate with the date_arithmetic extension (jeremyevans)
* Add Dataset#insert_empty_columns_values private method for easy overriding for databases that don't support INSERT with DEFAULT VALUES (jeremyevans)
=== 4.28.0 (2015-11-02)
* Add boolean_subsets plugin, which adds a subset for each boolean column (jeremyevans)
* Add subset_conditions plugin, which adds a method for each subset returning the filter conditions for the subset (jeremyevans)
* Make the list plugin work better with the auto_validations plugin when there is a validation on the position column (jeremyevans)
* Make to_csv for model datasets call instance methods, just like Model#to_csv, in the csv_serializer plugin (skrobul) (#1088)
* Raise Sequel::NoExistingObject instead of generic error if Model#refresh can't find the related row (jeremyevans)
=== 4.27.0 (2015-10-01)
* Don't stub Sequel.synchronize on MRI (YorickPeterse) (#1083)
* Make bin/sequel warn if given arguments that it doesn't use (jeremyevans)
* Fix the order of referenced composite keys returned by Database#foreign_key_list on PostgreSQL (jeremyevans) (#1081)
* Recognize another disconnect error in the jdbc/postgresql adapter (jeremyevans)
* In the active model plugin, make Model#persisted? return false if the transaction used for creation is rolled back (jeremyevans) (#1076)
* Use primary_key :keep_order option in the schema dumper if the auto incrementing column is not the first column in the table (jeremyevans)
* Set :auto_increment option correctly in the schema parser when the auto incrementing column is not the first column in the table (jeremyevans)
* Support :keep_order option to primary_key in schema generator, to not automatically make the primary key the first column (jeremyevans)
* Add new jsonb/json functions and operators supported in PostgreSQL 9.5+ (jeremyevans)
* Add before_after_save plugin, for refreshing created objects and resetting modified flag before calling after_create/update/save hooks (jeremyevans)
* Add Dataset#single_record! and #single_value! which don't require cloning the receiver (jeremyevans)
* Dataset#with_sql_single_value now works correctly for model datasets (jeremyevans)
* Optimize Dataset#single_value and #with_sql_single_value to not create an unnecessary array (jeremyevans)
* Make postgres adapter work with postgres-pr 0.7.0 (jeremyevans) (#1074)
=== 4.26.0 (2015-09-01)
* Make Dataset#== not consider frozen status in determining equality (jeremyevans)
* Support :if_exists option to drop_column on PostgreSQL (jeremyevans)
* Add Dataset#grouping_sets to support GROUP BY GROUPING SETS on PostgreSQL 9.5+, MSSQL 2008+, Oracle, DB2, and SQLAnywhere (jeremyevans)
* Fix handling of Class.new(ModelClass){set_dataset :table} on ruby 1.8 (jeremyevans)
* Use range function constructors instead of casts for known range types in pg_range (jeremyevans) (#1066)
* Make class_table_inheritance plugin work without sti_key (jeremyevans)
* Detect additional disconnect errors when using the tinytds adapter (jeremyevans)
* Make offset emulation without order but with explicit selection handle ambiguous column names (jeremyevans)
* Allow preparing already prepared statements when emulating limits and/or offsets (jeremyevans)
* Have Sequel::NoMatchingRow exceptions record the dataset related to the exception (pedro, jeremyevans) (#1060)
=== 4.25.0 (2015-08-01)
* Add Dataset#insert_conflict on PostgreSQL 9.5+, for upsert/insert ignore support using INSERT ON CONFLICT (jeremyevans)
* Support Dataset#group_rollup and #group_cube on PostgreSQL 9.5+ (jeremyevans)
* Automatically REORG tables when altering when using jdbc/db2 (karlhe) (#1054)
* Recognize constraint violation exceptions on swift/sqlite (jeremyevans)
* Recognize another check constraint violation exception message on SQLite (jeremyevans)
* Allow =~ and !~ to be used on ComplexExpressions (janko-m) (#1050)
* Support case sensitive SQL Server 2012 in MSSQL metadata queries (knut2) (#1049)
* Add Dataset#group_append, for appending to the existing GROUP BY clause (YorickPeterse) (#1047)
* Add inverted_subsets plugin, for creating an inverted subset method for each subset (celsworth) (#1042)
* Make Dataset#for_update not use the :read_only database when the dataset is executed (jeremyevans) (#1041)
* Add singular_table_names plugin, for changing Sequel to not pluralize table names by default (jeremyevans)
* PreparedStatement#prepare now raises an Error (jeremyevans)
* Clear delayed association pks when refreshing an object (jeremyevans)
* Add empty_array_consider_nulls extension to make Sequel consider NULL values when using IN/NOT IN with an empty array (jeremyevans)
* Make Sequel default to ignoring NULL values when using IN/NOT IN with an empty array (jeremyevans)
* Remove the deprecated firebird and informix adapters (jeremyevans)
* Make :collate option when creating columns literalize non-String values on PostgreSQL (jeremyevans) (#1040)
* Make dirty plugin notice when serialized column is changed (celsworth) (#1039)
* Allow prepared statements to use RETURNING (jeremyevans) (#1036)
=== 4.24.0 (2015-07-01)
* Allow class_table_inheritance plugin to support subclasses that don't add additional columns (QuinnHarris, jeremyevans) (#1030)
* Add :columns option to update_refresh plugin, specifying the columns to include in the RETURNING clause (celsworth) (#1029)
* Use column symbol key for auto validation unique errors if the unique index is on a single column (jeremyevans)
* Allow :timeout option to Database#listen in the postgres adapter to be a callable object (celsworth) (#1028)
* Add pg_inet_ops extension, for DSL support for PostgreSQL inet/cidr operators and functions (celsworth, jeremyevans) (#1024)
* Support :*_opts options in auto_validations plugin, for setting options for the underlying validation methods (celsworth, jeremyevans) (#1026)
* Support :delay_pks association option in association_pks to delay setting of associated_pks until after saving (jeremyevans)
* Make jdbc subadapters work if they issue queries while the subadapter is being loaded (jeremyevans) (#1022)
* Handle 64-bit auto incrementing primary keys in jdbc subadapters (DougEverly) (#1018, #1019)
* Remove the deprecated db2 and dbi adapters (jeremyevans)
* Make auto_validation plugin use :from=>:values option to setup validations on the underlying columns (jeremyevans)
* Add :from=>:values option to validation_helpers methods, for getting values from the values hash instead of a method call (jeremyevans)
=== 4.23.0 (2015-06-01)
* Make dataset.call_sproc(:insert) work in the jdbc adapter (flash-gordon) (#1013)
* Add update_refresh plugin, for refreshing a model instance when updating (jeremyevans)
* Add delay_add_association plugin, for delaying add_* method calls on new objects until after saving the object (jeremyevans)
* Add validate_associated plugin, for validating associated objects when validating the current object (jeremyevans)
* Make Postgres::JSONBOp#[] and #get_text return JSONBOp instances (jeremyevans) (#1005)
* Remove the fdbsql, jdbc/fdbsql, and openbase adapters (jeremyevans)
* Database#transaction now returns block return value if :rollback=>:always is used (jeremyevans)
* Allow postgresql:// connection strings as aliases to postgres://, for compatibility with libpq (jeremyevans) (#1004)
* Make Model#move_to in the list plugin handle out-of-range targets without raising an exception (jeremyevans) (#1003)
* Make Database#add_named_conversion_proc on PostgreSQL handle conversion procs for enum types (celsworth) (#1002)
=== 4.22.0 (2015-05-01)
* Deprecate the db2, dbi, fdbsql, firebird, jdbc/fdbsql, informix, and openbase adapters (jeremyevans)
* Avoid hash allocations and rehashes (jeremyevans)
* Don't silently ignore :jdbc_properties Database option in jdbc adapter (jeremyevans)
* Make tree plugin set reciprocal association for children association correctly (lpil, jeremyevans) (#995)
* Add Sequel::MassAssignmentRestriction exception, raised for mass assignment errors in strict mode (jeremyevans) (#994)
* Handle ODBC::SQL_BIT type as boolean in the odbc adapter, fixing boolean handling on odbc/mssql (jrgns) (#993)
* Make :auto_validations plugin check :default entry instead of :ruby_default entry for checking existence of default value (jeremyevans) (#990)
* Adapters should now set :default schema option to nil when adapter can determine that the value is nil (jeremyevans)
* Do not add a schema :max_length entry for a varchar(max) column on MSSQL (jeremyevans)
* Allow :default value for PostgreSQL array columns to be a ruby array when using the pg_array extension (jeremyevans) (#989)
* Add csv_serializer plugin for serializing model objects to and from csv (bjmllr, jeremyevans) (#988)
* Make Dataset#to_hash and #to_hash_groups handle single array argument for model datasets (jeremyevans)
* Handle Model#cancel_action in association before hooks (jeremyevans)
* Use a condition variable instead of busy waiting in the threaded connection pools on ruby 1.9+ (jeremyevans)
* Use Symbol#to_proc instead of explicit blocks (jeremyevans)
=== 4.21.0 (2015-04-01)
* Support :tsquery and :tsvector options in Dataset#full_text_search on PostgreSQL, for using existing tsquery/tsvector expressions (jeremyevans)
* Fix TinyTds::Error being raised when trying to cancel a query on a closed connection in the tinytds adapter (jeremyevans)
* Add GenericExpression#!~ for inverting =~ on ruby 1.9 (similar to inverting a hash) (jeremyevans) (#979)
* Add GenericExpression#=~ for equality, inclusion, and pattern matching (similar to using a hash) (jeremyevans) (#979)
* Add Database#add_named_conversion_proc on PostgreSQL to make it easier to add conversion procs for types by name (jeremyevans)
* Make Sequel.pg_jsonb return JSONBOp instances instead of JSONOp instances when passed other than Array or Hash (jeremyevans) (#977)
* Demodulize default root name in json_serializer plugin (janko-m) (#968)
* Make Database#transaction work in after_commit/after_rollback blocks (jeremyevans)
=== 4.20.0 (2015-03-03)
* Restore the use of AUTOINCREMENT on SQLite (jeremyevans) (#965)
* Duplicate the associations hash when duplicating a model object (jeremyevans)
* Correctly apply association limit when eager loading with an eager block using default limit strategy on some databases (jeremyevans)
* Fix eager loading when using the :window_function limit strategy with an eager block and cascaded associations (jeremyevans)
* Add support for set_column_type :auto_increment=>true to add AUTO_INCREMENT to existing column on MySQL (jeremyevans) (#959)
* Add support for overridding the :instance_specific association option (jeremyevans)
* Recognize MSSQL bit type as boolean in the schema_dumper (jeremyevans)
* Skip eager loading queries if there are no matching keys (jeremyevans) (#952)
* Dataset#paged_each now returns an enumerator if not passed a block (jeremyevans)
* Use to_json :root option with string value as the JSON object key in the json_serializer plugin (jeremyevans)
* Allow create_enum in the pg_enum extension be reversible in migrations (celsworth) (#951)
* Have swift adapter respect database and application timezone settings (asppsa, jeremyevans) (#946)
* Don't have the static cache plugin attempt to validate objects (jeremyevans)
* Make freeze not validate objects if their errors are already frozen (jeremyevans)
* Only use prepared statements for associations if caching association metadata (jeremyevans)
* Set parent association when loading descendants in the rcte_tree plugin (jeremyevans)
* Add Database#transaction :before_retry option, specifying a proc to call before retrying (uhoh-itsmaciek) (#941)
=== 4.19.0 (2015-02-01)
* Make jdbc/sqlanywhere correctly set :auto_increment entry in schema hashes (jeremyevans)
* Add Model#cancel_action for canceling actions in before hooks, instead of having the hooks return false (jeremyevans)
* Support not setting @@wait_timeout on MySQL via :timeout=>nil Database option (jeremyevans)
* Add accessed_columns plugin, recording which columns have been accessed for a model instance (jeremyevans)
* Use correct migration version when using IntegerMigrator with :allow_missing_migration_files (blerins) (#938)
* Make Dataset#union, #intersect, and #except automatically handle datasets with raw SQL (jeremyevans) (#934)
* Add column_conflicts plugin to automatically handle columns that conflict with method names (jeremyevans) (#929)
* Add Model#get_column_value and #set_column_value to get/set column values (jeremyevans) (#929)
=== 4.18.0 (2015-01-02)
* Make Dataset#empty? work when the dataset is ordered by a non-column expression (pete) (#923)
* Fix passing a hash value to :eager association option (jeremyevans)
* Treat all PG::ConnectionBad exceptions as disconnect errors in the postgres adapter (jeremyevans)
* Add :auto_increment key to schema information for primary key columns (jeremyevans) (#919)
* Fix handling of schema qualified tables in many_through_many associations (jeremyevans)
=== 4.17.0 (2014-12-01)
* Fix handling of Sequel::SQL::Blob instances in bound variables in the postgres adapter (jeremyevans) (#917)
* Add :preconnect Database option for immediately creating the maximum number of connections (jeremyevans)
* Support DB.pool.max_size for the single connection pools (jeremyevans)
* Work around regression in jdbc-sqlite3 3.8.7 where empty blobs are returned as nil (jeremyevans)
* Work around regression in jdbc-sqlite3 3.8.7 when using JDBC getDate method for date parsing (jeremyevans)
* Make Model#update_or_create return object if existing object exists but updates are not necessary (contentfree) (#916)
* Add Dataset#server? for conditionally setting a default server to use if no default is present (jeremyevans)
* Add Database#sharded? for determining if database uses sharding (jeremyevans)
* Fix server used by Dataset#insert_select on PostgreSQL (jeremyevans)
* Fix server used for deleting model instances when using sharding (jeremyevans)
=== 4.16.0 (2014-11-01)
* Make Database#create_table? and #create_join_table? not use IF NOT EXISTS if indexes are being added (jeremyevans) (#904)
* Dataset#distinct now accepts virtual row blocks (chanks) (#901)
* Recognize disconnect errors in the postgres adapter when SSL is used (jeremyevans) (#900)
* Stop converting '' default values to nil default values on MySQL (jeremyevans)
* Add Model#qualified_pk_hash, for returning a hash with qualified pk keys (jeremyevans)
* Make validates_unique use a qualified primary key if the dataset is joined (jeremyevans) (#895)
* Make Sequel::Model.cache_associations = false skip the database's schema cache when loading the schema (jeremyevans)
* Make Database#foreign_key_list work on Microsoft SQL Server 2005 (jeremyevans)
* Make create_table with :foreign option reversible on PostgreSQL (jeremyevans)
* Make drop_table with :foreign option on PostgreSQL drop a foreign table (johnnyt) (#892)
=== 4.15.0 (2014-10-01)
* Make AssociationReflection#reciprocal not raise error if associated class contains association with invalid associated class (jeremyevans)
* Make create_view(:view_name, dataset, :materialized=>true) reversible on PostgreSQL (jeremyevans)
* Add support for creating foreign tables on PostgreSQL using :foreign and :options create_table options (jeremyevans)
* Raise Error if a primary key is necessary to use an association, but the model doesn't have a primary key (jeremyevans)
* Make tactical_eager_loading plugin work for limited associations (jeremyevans)
* Add PlaceholderLiteralizer#with_dataset, for returning a new literalizer using a modified dataset (jeremyevans)
* Support active_model 4.2.0beta1 in the active_model plugin (jeremyevans)
* Make Dataset#insert in the informix adapter return last inserted id (jihwans) (#887)
* Support :nolog option in the informix adapter to disable transactions (jihwans) (#887)
* Remove optional argument for Postgres::{JSON,JSONB}Op#to_record and #to_recordset (jeremyevans)
* Add support for FoundationDB SQL Layer, via fdbsql and jdbc/fdbsql adapters (ScottDugas, jeremyevans) (#884)
* Work around bug in old versions of MySQL when schema dumping a table with multiple timestamp columns (jeremyevans) (#882)
* Support more array types by default in the pg_array extension, such as xml[] and uuid[] (jeremyevans)
* Add Sequel::Model.cache_associations accessor, which can be set to false to not cache association metadata (jeremyevans)
* Add split_values plugin, for moving noncolumn entries from the values hash into a separate hash (jeremyevans) (#868)
=== 4.14.0 (2014-09-01)
* Raise original exception if there is an exception raised when rolling back transaction/savepoint (jeremyevans) (#875)
* Allow delayed evaluation blocks to take dataset as an argument (jeremyevans)
* Allow more types as filter expressions, only specifically disallow Numeric/String expressions (jeremyevans)
* Remove objects from cached association array at time of nested_attributes call instead of waiting until save (jeremyevans)
* Handle composite primary keys when working around validation issues for one_to_(one|many) associations in nested_attributes plugin (jeremyevans) (#870)
* Recognize additional disconnect error in jdbc/jtds adapter (jeremyevans)
* Have association_join work with existing model selections (jeremyevans)
* Fix regression in class_table_inheritance plugin when lazily loading column in middle table (jeremyevans) (#862)
* Add cache_key_prefix method to caching plugin, which can be overridden for custom handling (pete) (#861)
* Add :when option to PostgreSQL create_trigger method, for adding a filter to the trigger (aschrab) (#860)
* Recognize an additional serialization failure on PostgreSQL (tmtm) (#857)
=== 4.13.0 (2014-08-01)
* Use copy constructors instead of overriding Model#dup and #clone (ged, jeremyevans) (#852)
* Fix handling of MySQL create_table foreign_key calls using :key option (mimperatore, jeremyevans) (#850)
* Handle another disconnection error in the postgres adapter (lbosque) (#848)
* Make list plugin update remaining positions after destroying an instance (ehq, jeremyevans) (#847)
* Unalias aliased tables in Dataset#insert (jeremyevans)
* Add insert_returning_select plugin, for setting up RETURNING for inserts for models selecting explicit columns (jeremyevans)
* Make Model#save use insert_select if the dataset used for inserting already uses returning (jeremyevans)
* Add Dataset#unqualified_column_for helper method, returning unqualified version of possibly qualified column (jeremyevans)
* Calling Dataset#returning when the Database does not support or emulate RETURNING now raises an Error (jeremyevans)
* Emulate RETURNING on Microsoft SQL Server using OUTPUT, as long as only simple column references are used (jeremyevans)
* Switch class_table_inheritance plugin to use JOIN ON instead of JOIN USING (jeremyevans)
* Qualify primary keys for models with joined datasets when looking up model instances by primary key (jeremyevans)
* Fix qualification of columns when Dataset#graph automatically wraps the initially graphed dataset in a subselect (jeremyevans)
* Make Dataset#joined_dataset? a public method (jeremyevans)
* Allow external jdbc, odbc, and do subadapters to be loaded automatically (jeremyevans)
* Recognize another disconnect error in the jdbc/mysql adapter (jeremyevans)
* Set primary keys correctly for models even if datasets select specific columns (jeremyevans)
* Add dataset_source_alias extension, for automatically aliasing datasets to their first source (jeremyevans)
* Use qualified columns in the lazy_attributes plugin (jeremyevans)
* Add column_select plugin, for using explicit column selections in model datasets (jeremyevans)
* Use associated model's existing selection for join associations if it consists solely of explicitly quailfied columns (jeremyevans)
* Add round_timestamps extension for automatically rounding timestamp values to database precision before literalizing (jeremyevans)
* Make rake default task run plugin specs as well as core/model specs (jeremyevans)
* Use all_tables and all_views for Database#tables and #views on Oracle (jeremyevans)
* Use all_tab_cols instead of user_tab cols for defaults parsing in the oracle adapter (jeremyevans)
* Fix recursive mutex locking issue on JRuby when using Sequel::Model(dataset) (jeremyevans) (#841)
* Make composition and serialization plugins support validations on underlying columns (jeremyevans)
* Fix regression in timestamps and table inheritance plugin where column values would not be saved if validation is skipped (jeremyevans) (#839)
* Add pg_enum extension, for dealing with PostgreSQL enums (jeremyevans)
* Add modification_detection plugin, for automatic detection of in-place column value modifications (jeremyevans)
* Speed up using plain strings, numbers, true, false, and nil in json columns if underlying json library supports them (jeremyevans) (#834)
=== 4.12.0 (2014-07-01)
* Support :readonly Database option in sqlite adapter (ippeiukai, jeremyevans) (#832)
* Automatically setup max_length validations for string columns in the auto_validations plugin (jeremyevans)
* Add :max_length entry to column schema hashes for string types (jeremyevans)
* Add :before_thread_exit option to Database#listen_for_static_cache_updates in pg_static_cache_updater extension (jeremyevans)
* Add Database#values on PostgreSQL to create a dataset that uses VALUES instead of SELECT (jeremyevans)
* Add Model#set_nested_attributes to nested_attributes, allowing setting nested attributes options per-call (jeremyevans)
* Use explicit columns when using automatically prepared SELECT statements in the prepared statement plugins (jeremyevans)
* Make Dataset#insert_select on PostgreSQL respect existing RETURNING clause (jeremyevans)
* Fix eager loading limited associations via a UNION when an association block is used (jeremyevans)
* Associate reciprocal object before saving associated object when creating new objects in nested_attributes (chanks, jeremyevans) (#831)
* Handle intervals containing more than 100 hours in the pg_interval extension's parser (will) (#827)
* Remove methods/class deprecated in 4.11.0 (jeremyevans)
* Allow Dataset#natural_join/cross_join and related methods to take a options hash passed to join_table (jeremyevans)
* Add :reset_implicit_qualifier option to Dataset#join_table, to set false to not reset the implicit qualifier (jeremyevans)
* Support :notice_receiver Database option when postgres adapter is used with pg driver (jeltz, jeremyevans) (#825)
=== 4.11.0 (2014-06-03)
* Add :model_map option to class_table_inheritance plugin so class names don't need to be stored in the database (jeremyevans)
* Set version when using for MySQL/SQLite emulation in the mock adapter (jeremyevans)
* Add support for CUBRID/SQLAnywhere emulation to the mock adapter (jeremyevans)
* Add support for the jsonb operators added in PostgreSQL 9.4 to the pg_json_ops extension (jeremyevans)
* Add support for new json/jsonb functions added in PostgreSQL 9.4 to the pg_json_ops extension (jeremyevans)
* Add support for the PostgreSQL 9.4+ jsonb type to the pg_json_ops extension (jeremyevans)
* Add support for derived column lists to Sequel.as and SQL::AliasMethods#as (jeremyevans)
* Support connecting to a DB2 catalog name in the ibmdb adapter (calh) (#821)
* Fix warnings in some cases in the ibmdb adapter (calh) (#820)
* Add SQL::Function#with_ordinality for creating set returning functions WITH ORDINALITY (jeremyevans)
* Add SQL::Function#filter for creating filtered aggregate function calls (jeremyevans)
* Add SQL::Function#within_group for creating ordered-set and hypothetical-set aggregate functions (jeremyevans)
* Add SQL::Function#lateral, for creating set returning functions that will be preceded by LATERAL (jeremyevans)
* Add SQL::Function#quoted and #unquoted methods, to enable/disable quoting of function names (jeremyevans)
* Deprecate Dataset#{window,emulated,}_function_sql_append (jeremyevans)
* Deprecate SQL::WindowFunction and SQL::EmulatedFunction classes, switch to using options on SQL::Function (jeremyevans)
* Only modify changed_columns if deserialized value changes in the serialization plugin (jeremyevans) (#818)
* Support PostgreSQL 9.4+ jsonb type in the pg_json extension (jeremyevans)
* Allow Postgres::ArrayOp#unnest to accept arguments in the pg_array_ops extension (jeremyevans)
* Add Postgres::ArrayOp#cardinality to the pg_array_ops extension (jeremyevans)
* Add :check option to Database#create_view for WITH [LOCAL] CHECK OPTION support (jeremyevans)
* Add :concurrently option to Database#refresh_view on PostgreSQL to support concurrent refresh of materialized views (jeremyevans)
* Call the :after_connect Database option proc with both the connection and server/shard if it accepts 2 arguments (pedro, jeremyevans) (#813)
* Make multiple plugins set values before validation instead of before create, works better with auto_validations (jeremyevans)
* Support a default Dataset#import slice size, set to 500 on SQLite (jeremyevans) (#810)
* Make :read_only transaction option be per-savepoint on PostgreSQL (jeremyevans) (#807)
* Add :rank option to Dataset#full_text_search on PostgreSQL, to order by the ranking (jeremyevans) (#809)
* Remove methods deprecated in 4.10.0 (jeremyevans)
=== 4.10.0 (2014-05-01)
* Make Model.include API same as Module.include (ged) (#803)
* Dataset::PlaceholderLiteralizer now handles DelayedEvaluations correctly (jeremyevans)
* Refactor type conversion in the jdbc adapter, for up to a 20% speedup (jeremyevans)
* Add Dataset#with_fetch_size to jdbc adapter, for setting fetch size for JDBC ResultSets (jeremyevans)
* Default to a fetch_size of 100 in the jdbc/oracle adapter, similar to the oci8-based oracle adapter (jeremyevans)
* Add Database#fetch_size accessor and :fetch_size option to jdbc adapter, for setting JDBC Statement fetch size (jeremyevans)
* Automatically determine array type in pg_array_associations plugin, explicitly cast arrays in more places (jeremyevans, maccman) (#800)
* Speed up Dataset#literal for symbols 60% by caching results, speeding up dataset literalization up to 40% or more (jeremyevans)
* Speed up Sequel.split_symbol 10-20x by caching results, speeding up dataset literalization up to 80% or more (jeremyevans)
* Speed up dataset literalization for simple datasets by up to 100% (jeremyevans)
* Support :fractional_seconds Database option on MySQL 5.6.5+ to support fractional seconds by default (jeremyevans) (#797)
* Work around MySQL 5.6+ bug when combining DROP FOREIGN KEY and DROP INDEX in same ALTER TABLE statement (jeremyevans)
* Make auto_validations plugin handle models that select from subqueries (jeremyevans)
* Recognize additional disconnect errors in the postgres adapter (jeremyevans)
* Make import/multi_insert insert multiple rows in a single query using a UNION on Oracle, DB2, and Firebird (jeremyevans)
* Speed up association_pks many_to_many setter method by using Dataset#import (jeremyevans)
* Add Model.prepared_finder, similar to .finder but using a prepared statement (jeremyevans)
* Model.def_{add_method,association_dataset_methods,remove_methods} are now deprecated (jeremyevans)
* Model.eager_loading_dataset and Model.apply_association_dataset_opts are now deprecated (jeremyevans)
* Make prepared_statement_associations plugin handle one_through_one and one_through_many associations (jeremyevans)
* Use placeholder literalizer for regular association loading for up to 85% speedup (jeremyevans)
* Use placeholder literalizer for eager association loading for up to 20% speedup (jeremyevans)
* Make Model#marshallable! work correctly when using the tactical_eager_loading plugin (jeremyevans)
* Respect :foreign_key_constraint_name option when adding columns to existing table on MySQL (noah256) (#795)
* AssociationReflection#association_dataset now handles joining tables if necessary (jeremyevans)
* Support drop_view :if_exists option on SQLite, MySQL, H2, and HSQLDB (jeremyevans) (#793)
* Support drop_table :if_exists option on HSQLDB (jeremyevans)
* Add Database#transaction :auto_savepoint option, for automatically using a savepoint in nested transactions (jeremyevans)
* Add :server_version Database option on Microsoft SQL Server, instead of querying the database for it (jeremyevans)
* Support :correlated_subquery as an eager_graph and filter by associations limit strategy for one_to_* associations (jeremyevans)
* Support named paramters in call_mssql_sproc on Microsoft SQL Server (y.zemlyanukhin, jeremyevans) (#792)
* Handle placeholder literalizer arguments when emulating offsets (jeremyevans)
* Don't attempt to emulate offsets if the dataset uses literal SQL (jeremyevans)
* Use a UNION-based strategy by default to eagerly load limited associations (jeremyevans)
* Support offsets without limits on MySQL, SQLite, H2, SQLAnywhere and CUBRID (jeremyevans)
* Remove the install/uninstall rake tasks (jeremyevans)
* Use INSERT VALUES with multiple rows for Dataset#import and #multi_insert on more databases (jeremyevans)
* Support common table expressions (WITH clause) on SQLite >=3.8.3 (jeremyevans)
=== 4.9.0 (2014-04-01)
* Recognize CHECK constraint violations on newer versions of SQLite (jeremyevans)
* Do not attempt to eager load when calling Dataset#columns in the eager_each plugin (jeremyevans)
* Support :driver option for jdbc adapter, for specifying driver class for cases where getConnection doesn't work (jeremyevans) (#785)
* Massive speedup for PostgreSQL array parser (jeremyevans) (#788)
* Add current_datetime_timestamp extension, for current Time/DateTime instances that are literalized as CURRENT_TIMESTAMP (jeremyevans)
* Recognize additional unique constraint violations on SQLite (jeremyevans) (#782)
* Don't remove column value when validating nested attributes for one_to_* association where association foreign key is the model's primary key (jeremyevans)
* Add Dataset#disable_insert_returning on PostgreSQL for skipping implicit use of RETURNING (jeremyevans)
* Automatically optimize Model.[], .with_pk, and .with_pk! for models with composite keys (jeremyevans)
* Automatically optimize Model.[] when called with a hash (jeremyevans)
* Automatically optimize Model.find, .first, and .first! when called with a single argument (jeremyevans)
* Add Model.finder for creating optimized finder methods using Dataset::PlaceholderLiteralizer (jeremyevans)
* Add Dataset::PlaceholderLiteralizer optimization framework (jeremyevans)
* Add Dataset#with_sql_{each,all,first,single_value,insert,update} optimized methods (jeremyevans)
* Make pg_array extension use correct type when typecasting column values for smallint, oid, real, character, and varchar arrays (jeremyevans)
* Make Database#column_schema_to_ruby_default a public method in the schema_dumper extension (jeremyevans) (#776)
* Fix multiple corner cases in the eager_graph support (jeremyevans) (#771)
* Use streaming to implement paging for Dataset#paged_each in the mysql2 adapter (jeremyevans)
* Use a cursor to implement paging for Dataset#paged_each in the postgres adapter (jeremyevans)
* Add Database#create_join_table? and #create_join_table! for consistency (jeremyevans)
* Add Dataset#where_current_of to the postgres adapter for supporting updating rows based on a cursor's current position (jeremyevans)
* Add Dataset#use_cursor :hold option in the postgres adapter for supporting cursor use outside of a transaction (jeremyevans)
* Add Dataset#paged_each :strategy=>:filter option for increased performance (jeremyevans)
=== 4.8.0 (2014-03-01)
* Add SQL::AliasedExpression#alias alias for #aliaz (jeremyevans)
* Handle SQL::Identifier, SQL::QualifiedIdentifier, and SQL::AliasedExpression objects as first argument to Dataset#graph (jeremyevans)
* Respect qualification and aliases in symbols passed as first argument to Dataset#graph (dividedmind) (#769)
* Recognize new constraint violation error messages in SQLite 3.8.2+ (itswindtw) (#766)
* Use limit strategy to correctly handle limited associations in the dataset_associations plugin (jeremyevans)
* Handle issues in dataset_associations plugin when dataset uses unqualified identifiers for associations requiring joins (jeremyevans)
* Handle fractional seconds in input timestamps in the odbc/mssql adapter (Ross Attrill, jeremyevans)
* Return fractional seconds in timestamps in the odbc adapter (jeremyevans)
* Support :plain and :phrase options to Dataset#full_text_search on PostgreSQL (jeremyevans)
* Use limit strategy to correctly handle filtering by limited associations (jeremyevans)
* Simplify queries used for filtering by associations with conditions (jeremyevans)
* Use an eager limit strategy by default for *_one associations with orders (jeremyevans)
* Support :limit_strategy eager_graph option, for specifying strategy used for limited associations in that eager graph (jeremyevans)
* Add eager_graph_with_options to model datasets, for specifying options specific to the eager_graph call (jeremyevans)
* Handle offsets on *_many associations when eager graphing when there are no associated results (jeremyevans)
* Make Database#register_array_type work without existing scalar conversion proc in the pg_array extension (jeremyevans)
* Handle presence validations on foreign keys in associated objects when creating new associated objects in the nested_attributes plugin (jeremyevans)
* Respect offsets when eager graphing *_one associations (jeremyevans)
* Add association_join to model datasets, for setting up joins based on associations (jeremyevans)
* Add one_through_many association to many_through_many plugin, for only returning a single record (jeremyevans)
* Add :graph_order association option, useful when :order needs to contain qualified identifiers (jeremyevans)
* Add one_through_one association, similar to many_to_many but only returning a single record (jeremyevans)
=== 4.7.0 (2014-02-01)
* Don't swallow underlying exception if there is an exception closing the cursor on PostgreSQL (jeremyevans) (#761)
* Recognize primary key unique constraint violations on MSSQL and SQLAnywhere (jeremyevans)
* Recognize composite unique constraint violations on SQLite (timcraft) (#758)
* Make #* method without arguments on SQL::Function return a Function with * prepended to the arguments (jeremyevans)
* Add #function to SQL::Identifier and SQL::QualifiedIdentifier, allowing for easy use of schema qualified functions or functions names that need quoting (jeremyevans)
* Add SQL::Function#distinct for easier creation of aggregate functions using DISTINCT (jeremyevans)
* Add SQL::Function#over for easier creation of window functions (jeremyevans)
* Don't clear validation instance_hooks until after a successful save (jeremyevans)
* Support :raise_on_save_failure option for one_to_many, pg_array_to_many, and many_to_pg_array associations (jeremyevans)
* Make SQLTime#to_s return a string in HH:MM:SS format, since it shouldn't include date information (jeremyevans)
* Support the Database#tables :schema option in the jdbc adapter (robbiegill, jeremyevans) (#755)
* Automatically rollback transactions in killed threads in ruby 2.0+ (chanks) (#752)
* Add update_or_create plugin, for updating an object if it exists, or creating such an object if it does not (jeremyevans)
* Make auto_validations uniqueness validations work correctly for STI subclasses (jeremyevans)
* Support :dataset option to validates_unique vaildation (jeremyevans)
=== 4.6.0 (2014-01-02)
* Add Database#call_mssql_sproc on MSSQL for calling stored procedures and handling output parameters (jrgns, jeremyevans) (#748)
* Handle RuntimeErrors raised by oci8 in the oracle adapter (jeremyevans)
* Support OFFSET/FETCH on Microsoft SQL Server 2012 (jeremyevans)
* Support :server option for Database#{commit,rollback}_prepared_transaction on PostgreSQL, MySQL, and H2 (jeremyevans) (#743)
* Do not attempt to eager load and raise an exception when doing Model.eager(...).naked.all (jeremyevans)
* Recognize a couple additional disconnect errors in the jdbc/postgresql adapter (jeremyevans) (#742)
=== 4.5.0 (2013-12-02)
* Support :on_commit=>(:drop|:delete_rows|:preserve_rows) options when creating temp tables on PostgreSQL (rosenfeld) (#737)
* Make Dataset#insert work on PostgreSQL if the table name is a SQL::PlaceholderLiteralString (jeremyevans) (#736)
* Copy unique constraints when emulating alter_table operations on SQLite (jeremyevans) (#735)
* Don't return clob column values as SQL::Blob instances in the db2 and ibmdb adapters unless use_clob_as_blob is true (jeremyevans)
* Make use_clob_as_blob false by default on DB2 (jeremyevans)
* Fix usage of Sequel::SQL::Blob objects as prepared statement arguments in jdbc/db2 adapter when use_clob_as_blob is false (jeremyevans)
* Add mssql_optimistic_locking plugin, using a timestamp/rowversion column to protect against concurrent updates (pinx, jeremyevans) (#731)
* Make Model.primary_key array immutable for composite keys (chanks) (#730)
=== 4.4.0 (2013-11-01)
* Make Database#tables not show tables in the recycle bin on Oracle (jeremyevans) (#728)
* Don't automatically order on all columns when emulating offsets for unordered datasets on DB2 (jeremyevans)
* Improve PostgreSQL type support in the jdbc/postgresql adapter (jeremyevans)
* Make offset emulation on Oracle work when using columns that can't be ordered (jeremyevans, sdeming) (#724, #725)
* Make filter by associations support handle associations with :conditions or block (jeremyevans)
* Make association cloning handle :block correctly for clones of clones (jeremyevans)
* Make association cloning handle :eager_block option correctly (jeremyevans)
* Make add_primary_key work on h2 (jeremyevans)
* Add support for foreign key parsing on Oracle (jeremyevans)
* Add support for foreign key parsing to the jdbc adapter (jeremyevans)
* Make add_foreign_key work on HSQLDB (jeremyevans)
* Add table_select plugin for selecting table.* instead of * for model datasets (jeremyevans)
* Issue constraint_validation table deletes before inserts, so modifying constraint via drop/add in same alter_table block works (jeremyevans)
* Support add_*/remove_*/remove_all_* pg_array_to_many association methods on unsaved model objects (jeremyevans)
* Add Sybase SQLAnywhere support via new sqlanywhere and jdbc/sqlanywhere adapters (gditrick, jeremyevans)
* Add Dataset#offset for setting the offset separately from the limit (Paul Henry, jeremyevans) (#717)
=== 4.3.0 (2013-10-02)
* Fix literalization of empty blobs on MySQL (jeremyevans) (#715)
* Ensure Dataset#page_count in pagination extension is at least one (jeremyevans) (#714)
* Recognize another disconnect error in the jdbc/as400 adapter (jeremyevans)
* Make Dataset#qualify and Sequel.delay work together (jeremyevans)
* Recognize citext type as string on PostgreSQL (isc) (#710)
* Support composite keys in the rcte_tree plugin (jeremyevans)
* Support composite keys in the tree plugin (jeremyevans)
* Make Migrator.migrator_class public (robertjpayne, jeremyevans) (#708)
* Make PostgreSQL empty array literalization work correctly on PostgreSQL <8.4 (jeremyevans)
* Add Sequel extensions guide (jeremyevans)
* Add model plugins guide (jeremyevans)
* Add error_sql Database extension, allowing DatabaseError#sql to return SQL query that caused underlying exception (jeremyevans)
* Make Dataset#each_page in pagination extension return enumerator if no block is given (justinj) (#702)
=== 4.2.0 (2013-09-01)
* Support custom :flags option in mysql2 adapter (jeremyevans) (#700)
* Add implementations of Dataset#freeze and Dataset#dup (jeremyevans)
* Add implementations of Model#dup and Model#clone (jeremyevans)
* Don't have partial_indexes returned by Database#indexes on MSSQL 2008+ (jeremyevans)
* Support partial indexes on SQLite 3.8.0+ (jeremyevans)
* Add Database#supports_partial_indexes? to check for partial index support (mluu, jeremyevans) (#698)
* The static_cache plugin now disallows saving/destroying if the :frozen=>false option is not used (jeremyevans)
* Support :frozen=>false option in static_cache plugin, for having new instances returned instead of frozen cached instances (jeremyevans)
* Add pg_static_cache_updater Database extension for listening for changes to tables and updating static_cache caches automatically (jeremyevans)
* Add mssql_emulate_lateral_with_apply extension for emulating LATERAL queries using CROSS/OUTER APPLY (jeremyevans)
* Support LATERAL queries via Dataset#lateral (jeremyevans)
* Add pg_loose_count Database extension, for fast approximate counts of PostgreSQL tables (jeremyevans)
* Add from_block Database extension, for having Database#from block affect FROM instead of WHERE (jeremyevans)
* Support :cursor_name option in postgres adapter Dataset#use_cursor (heeringa, jeremyevans) (#696)
* Fix placeholder literal strings when used with an empty placeholder hash (trydionel, jeremyevans) (#695)
=== 4.1.1 (2013-08-01)
* Fix select_map, select_order_map, and single_value methods on eager_graphed datasets (jeremyevans)
=== 4.1.0 (2013-08-01)
* Support :inherits option in Database#create_table on PostgreSQL, for table inheritance (jeremyevans)
* Handle dropping indexes for schema qualified tables on PostgreSQL (jeremyevans)
* Add Database#error_info on PostgreSQL 9.3+ if pg-0.16.0+ is used, to get a hash of metadata for a given database exception (jeremyevans)
* Allow prepared_statements plugin to work with instance_filters and update_primary_key plugins (jeremyevans)
* Support deferrable exclusion constraints on PostgreSQL using the :deferrable option (mfoody) (#687)
* Make Database#run and #<< accept SQL::PlaceholderLiteralString values (jeremyevans)
* Deprecate :driver option in odbc adapter since it appears to be broken (jeremyevans)
* Support :drvconnect option in odbc adapter for supplying the ODBC connection string directly (jeremyevans)
* Support mysql2 0.3.12+ result streaming via Dataset#stream (jeremyevans)
* Convert Java::JavaUtil::HashMap to ruby Hash in jdbc adapter, for better handling of PostgreSQL hstore type (jeremyevans) (#686)
* Raise NoMatchingRow if calling add_association with a primary key value that doesn't match an existing row (jeremyevans)
* Allow PostgreSQL add_constraint to support :not_valid option (jeremyevans)
* Allow CHECK constraints to have options by using an options hash as the constraint name (jeremyevans)
* Correctly raise error when using an invalid virtual row block function call (jeremyevans)
* Support REPLACE on SQLite via Dataset#replace and #multi_replace (etehtsea) (#681)
=== 4.0.0 (2013-07-01)
* Correctly parse composite primary keys on SQLite 3.7.16+ (jeremyevans)
* Recognize another disconnect error in the jdbc/oracle adapter (jeremyevans)
* Add pg_json_ops extension for calling JSON functions and operators in PostgreSQL 9.3+ (jeremyevans)
* Handle non-JSON plain strings, integers, and floats in PostgreSQL JSON columns in pg_json extension (jeremyevans)
* Dataset#from now accepts virtual row blocks (jeremyevans)
* Add Database#refresh_view on PostgreSQL to support refreshing materialized views (jeremyevans)
* Support the Database#drop_view :if_exists option on PostgreSQL (jeremyevans)
* Support the Database#{create,drop}_view :materialized option for creating materialized views in PostgreSQL 9.3+ (jeremyevans)
* Support the Database#create_view :recursive option for creating recursive views in PostgreSQL 9.3+ (jeremyevans)
* Support the Database#create_view :columns option for using explicit columns (jeremyevans)
* Support the Database#create_schema :owner and :if_not_exists options on PostgreSQL (jeremyevans)
* Support :index_type=>:gist option to create GIST full text indexes on PostgreSQL (jeremyevans)
* Add Postgres::ArrayOp#replace for the array_replace function in PostgreSQL 9.3+ (jeremyevans)
* Add Postgres::ArrayOp#remove for the array_remove function in PostgreSQL 9.3+ (jeremyevans)
* Add Postgres::ArrayOp#hstore for creating hstores from arrays (jeremyevans)
* Make Postgres::ArrayOp#[] return ArrayOp if given a range (jeremyevans)
* Ensure that CHECK constraints are surrounded with parentheses (jeremyevans)
* Ensure Dataset#unbind returned variable hash uses symbol keys (jeremyevans)
* Add pg_array_associations plugin, for associations based on PostgreSQL arrays containing foreign keys (jeremyevans)
* Add Sequel.deep_qualify, for easily doing a deep qualification (jeremyevans)
* Enable use of window functions for limited eager loading by default (jeremyevans)
* Handle offsets correctly when eager loading one_to_one associations (jeremyevans)
* Raise exception for infinite and NaN floats on MySQL (jeremyevans) (#677)
* Make dataset string literalization that requires database connection use dataset's chosen server (jeremyevans)
* Make sure an offset without a limit is handled correctly when eager loading (jeremyevans)
* Allow providing ranges as subscripts for array[start:end] (jeremyevans)
* Prepare one_to_one associations in the prepared_statements_associations plugin (jeremyevans)
* Use prepared statements when the association has :conditions in the prepared_statements_associations plugin (jeremyevans)
* Fix prepared statement usage in some additional cases in the prepared_statements_associations plugin (jeremyevans)
* Hex escape blob input on MySQL (jeremyevans)
* Handle more disconnect errors when using the postgres adapter with the postgres-pr driver (jeremyevans)
* Model#setter_methods private method now accepts 1 argument instead of 2 (jeremyevans)
* Model#set_restricted and #update_restricted private methods now accept 2 arguments instead of 3 (jeremyevans)
* ungraphed on an eager_graph dataset now resets the original row_proc (jeremyevans)
* eager_graph now returns a naked dataset (jeremyevans)
* All behavior deprecated in Sequel 3.48.0 has been removed (jeremyevans)
* Make adapter/integration spec environment variables more consistent (jeremyevans)
* Sequel no longer provides default databases for adapter/integration specs (jeremyevans)
* Model#save no longer calls #_refresh internally (jeremyevans)
* Model#set_all and #update_all can now update the primary key (jeremyevans)
* Integrate many_to_one_pk_lookup and association_autoreloading plugins into main associations plugin (jeremyevans)
* Make defaults_setter plugin operate in a lazy manner (jeremyevans)
* Plugins now extend the model class with ClassMethods before including InstanceMethods (jeremyevans)
* Remove Model::EMPTY_INSTANCE_VARIABLES (jeremyevans)
* Model.raise_on_typecast_failure now defaults to false (jeremyevans)
* Model#_save private method now only takes a single argument (jeremyevans)
* Remove Dataset#columns_without_introspection from columns_introspection extension (jeremyevans)
* Make boolean prepared statement arguments work on sqlite adapter when integer_booleans is true (jeremyevans)
* Make Database#tables and #views reflect search_path on PostgreSQL (jeremyevans)
* SQLite now defaults to true for integer_booleans and false for use_timestamp_timezones (jeremyevans)
* Make the default value for most option hashes a shared frozen hash (jeremyevans)
* Remove Sequel::NotImplemented exception (jeremyevans)
* Automatically alias single expressions in Dataset#get, #select_map, and #select_order_map, to work around possible DoS issues (jeremyevans)
* Use a connection queue instead of stack by default for threaded connection pools (jeremyevans)
* Remove SQL::SQLArray alias for SQL::ValueList (jeremyevans)
* Remove SQL::NoBooleanInputMethods empty module (jeremyevans)
=== 3.48.0 (2013-06-01)
* Make named_timezones extension usable by databases allowing timezone strings to be given to Database#timezone= (jeremyevans)
* Make Dataset#or just clone if given an empty argument (jeremyevans)
* Deprecated using a mismatched number of placeholders and arguments in a placeholder literal string (jeremyevans)
* Add Dataset#qualify_to and #qualify_to_first_source to sequel_3_dataset_methods extension (jeremyevans)
* Add scissors plugin for Model.update, .delete, and .destroy (jeremyevans)
* Validate against explicit nil values in NOT NULL columns with default values in the auto_validations plugin (jeremyevans)
* Support :not_null=>:presence option for auto_validations plugin, for using presence validation for not null columns (jeremyevans)
* Rename auto_validate_presence_columns to auto_validate_not_null_columns (jeremyevans)
* Make pg_hstore_ops extension integrate with pg_array, pg_hstore, and pg_array_ops extensions (jeremyevans)
* Add Sequel.json_parser_error_class and Sequel.object_to_json to allow the use of alternative JSON implementations (jeremyevans) (#662)
* Deprecate JSON.create_id usage in the json_serializer plugin (jeremyevans)
* Emulate offsets on Microsoft Access using reverse orders and total counts (jeremyevans) (#661)
* Make ado adapter handle disconnecting an already disconnected connection (jeremyevans)
* Deprecate parsing columns for the same table name in multiple schemas on jdbc (jeremyevans)
* Allow association_proxies plugin to accept a block to give user control over which methods are proxied to the dataset (jeremyevans) (#660)
* Deprecate calling Dataset#add_graph_aliases before #graph or #set_graph_aliases (jeremyevans)
* Deprecate Model.add_graph_aliases, .insert_multiple, .query, .set_overrides, .set_defaults, .to_csv, and .paginate (jeremyevans)
* Add guide for ordering code with Sequel (jeremyevans)
* Deprecate Database#transaction :disconnect=>:retry option (jeremyevans)
* Deprecate Model.set, .update, .delete, and .destroy (jeremyevans)
* Deprecate Dataset#set (jeremyevans)
* Add specs for bin/sequel (jeremyevans)
* Make constraint_validations plugin reflect validations by column (jeremyevans)
* Allow for per-model/per-validation type customization of validation options in constraint_validations plugin (jeremyevans)
* Make Database#constraint_validations in the constraint_validations plugin have raw row values (jeremyevans)
* Fix statement freeing in the ibmdb adapter (jeremyevans)
* Make single and class table inheritance plugins handle usage of set_dataset in a subclass (jeremyevans)
* Allow validates_schema_types in validation_helpers plugin accept an options hash (jeremyevans)
* Deprecate Model.set_primary_key taking multiple arguments (jeremyevans)
* Make auto_validations plugin work with databases that don't support index parsing (jeremyevans)
* Model classes will no longer call Database#schema if it isn't supported (jeremyevans)
* Speed up Model.with_pk and with_pk! class methods (jeremyevans)
* Speed up Dataset#clone when called without an argument (jeremyevans)
* Deprecate Postgres::PGRangeOp#{starts_before,ends_after} (jeremyevans)
* Deprecate global use of null_dataset, pagination, pretty_table, query, select_remove, schema_caching, schema_dumper, and to_dot extensions (jeremyevans)
* Deprecate Dataset.introspect_all_columns in the columns_introspection extension (jeremyevans)
* Add empty_array_ignore_nulls extension for ignoring null handling for IN/NOT with an empty array (jeremyevans)
* Deprecate Sequel.empty_array_handle_nulls accessor (jeremyevans)
* Deprecate Sequel.{k,ts,tsk}_require and Sequel.check_requiring_thread (jeremyevans)
* Discontinue use of manual thread-safe requiring (jeremyevans)
* Deprecate using an unsupported client_min_messages setting on PostgreSQL (jeremyevans)
* Deprecate passing non-hash 4th argument to Dataset#join_table (jeremyevans)
* Deprecate passing non-hash 2nd argument to Dataset#union/intersect/except (jeremyevans)
* Deprecate one_to_many with :one_to_one option raising an error (jeremyevans)
* Automatically typecast hash and array to string for string columns in the looser_typecasting extension (jeremyevans)
* Deprecate automatic typecasting of hash and array to string for string columns (jeremyevans)
* Deprecate :eager_loader and :eager_grapher association options getting passed 3 separate arguments (jeremyevans)
* Deprecate validates_not_string (jeremyevans)
* Deprecate casting via __type suffix for prepared type placeholders in the postgres adapter (jeremyevans)
* Deprecate json_serializer's Model.json_create (jeremyevans)
* Deprecate json_serializer from_json and xml_serializer from_xml :all_columns and :all_associations options (jeremyevans)
* Deprecate passing an unsupported lock mode to Dataset#lock on PostgreSQL (jeremyevans)
* Deprecate Model::InstanceMethods.class_attr_{overridable,reader} (jeremyevans)
* Deprecate all methods in Dataset::PUBLIC_APPEND_METHODS except for literal, quote_identifier, quote_schema_table (jeremyevans)
* Deprecate all methods in Dataset::PRIVATE_APPEND_METHODS (jeremyevans)
* Deprecate Dataset.def_append_methods (jeremyevans)
* Deprecate Dataset#table_ref_append (jeremyevans)
* Deprecate SQL::Expression#to_s taking an argument and returning a literal SQL string (jeremyevans)
* Deprecate creating Model class methods automatically from plugin public dataset methods (jeremyevans)
* Add Sequel.cache_anonymous_models accessor (jeremyevans)
* Deprecate Sequel::Model.cache_anonymous_models accessor (jeremyevans)
* Deprecate identity_map plugin (jeremyevans)
* Deprecate Model#set_values (jeremyevans)
* Deprecate pg_auto_parameterize and pg_statement_cache extensions (jeremyevans)
* Deprecate Model#pk_or_nil (jeremyevans)
* Deprecate Model.print and Model.each_page (jeremyevans)
* Deprecate Dataset checking that the Database implements the identifier mangling methods (jeremyevans)
* Deprecate Database#reset_schema_utility_dataset private method (jeremyevans)
* Speed up Database#fetch, #from, #select, and #get by using a cached dataset (jeremyevans)
* Make sure adapters with subadapters have fully initialized database instances before calling Database.after_initialize (jeremyevans)
* Set identifier mangling methods on Database initialization (jeremyevans)
* Switch internal use of class variables to instance variables (jeremyevans)
* Deprecate passing an options hash to Database#dataset or Dataset.new (jeremyevans)
* Speed up Dataset#clone (jeremyevans)
* Add sequel_3_dataset_methods extension for Dataset#[]=, #insert_multiple, #set, #to_csv, #db=, and #opts= (jeremyevans)
* Deprecate Dataset#[]=, #insert_multiple, #to_csv, #db=, and #opts= (jeremyevans)
* Add blacklist_security plugin for Model.restricted_columns, Model.set_restricted_columns, Model#set_except, and Model#update_except (jeremyevans)
* Deprecate Model.restricted_columns, Model.set_restricted_columns, Model#set_except, and Model#update_except (jeremyevans)
* Deprecate Database#default_schema (jeremyevans)
* Deprecate Sequel::NotImplemented and defining methods that raise it (jeremyevans)
* Add Database#supports_{index_parsing,foreign_key_parsing,table_listing,view_listing}? (jeremyevans)
* Deprecate Sequel.virtual_row_instance_eval accessor (jeremyevans)
* Deprecate sequel_core.rb and sequel_model.rb (jeremyevans)
* Add graph_each extension for Dataset#graph_each (jeremyevans)
* Deprecate Dataset#graph_each (jeremyevans)
* Add set_overrides extension for Dataset#set_overrides and #set_defaults (jeremyevans)
* Deprecate Dataset#set_overrides and #set_defaults (jeremyevans)
* Deprecate Database#query in the informix adapter (jeremyevans)
* Deprecate Database#do as an alias to execute/execute_dui in some adapters (jeremyevans)
* Deprecate modifying initial Dataset hash if the hash wasn't provided as an argument (jeremyevans)
* Make active_model plugin use an errors class with autovivification (jeremyevans)
* Deprecate Model::Errors#[] autovivification (returning empty array when missing) (jeremyevans)
* Add Model#errors_class private method for choosing the errors class on a per-model basis (jeremyevans)
* Add after_initialize plugin for the after_initialize hook (jeremyevans)
* Deprecate Model after_initialize hook (jeremyevans)
* Deprecate passing two arguments to Model.new (jeremyevans)
* Deprecate choosing reciprocal associations with conditions, blocks, or differing primary keys (jeremyevans)
* Deprecate choosing first from ambiguous reciprocal associations (jeremyevans)
* Deprecate validates_type allowing nil values by default (jeremyevans)
* Deprecate the correlated_subquery eager limit strategy (jeremyevans)
* Add hash_aliases extension for making Dataset#select and #from treat hashes as alias specifiers (jeremyevans)
* Deprecate having Dataset#select and #from treat hashes as alias specifiers (jeremyevans)
* Do not automatically convert virtual row block return values to arrays by some Dataset methods (jeremyevans)
* Add filter_having extension for making Dataset#{and,filter,exclude,or} affect the HAVING clause if present (jeremyevans)
* Deprecate Dataset#select_more meaning Dataset#select when called without an existing selection (jeremyevans)
* Deprecate Dataset#and, #or, and #invert raising exceptions for no existing filter (jeremyevans)
* Deprecate Dataset#{and,filter,exclude,or} affecting the HAVING clause (jeremyevans)
* Deprecate passing explicit columns to update as separate arguments to Model#save (jeremyevans)
* Allow specifying explicit columns to update in Model#save via the :columns option (jeremyevans)
* Add ability set the default for join_table's :qualify option via Dataset#default_join_table_qualification (jeremyevans)
* Deprecated :root=>true meaning :root=>:both in the json_serializer (jeremyevans)
* Deprecate core extension usage if the core_extensions have not been explicitly loaded (jeremyevans)
* Deprecate Symbol#{[],<,<=,>,>=} methods when using the core_extensions (jeremyevans)
* Add ruby18_symbol_extensions extension for the Symbol#{[],<,<=,>,>=} methods (jeremyevans)
=== 3.47.0 (2013-05-01)
* Don't fail for missing conversion proc in pg_typecast_on_load plugin (jeremyevans)
* Rename PGRangeOp #starts_before and #ends_after to #ends_before and #starts_after (soupmatt) (#655)
* Add Database#supports_schema_parsing? for checking for schema parsing support (jeremyevans)
* Handle hstore[] types on PostgreSQL if using pg_array and pg_hstore extensions (jeremyevans)
* Don't reset conversion procs when loading pg_* extensions (jeremyevans)
* Handle domain types when parsing the schema on PostgreSQL (jeremyevans)
* Handle domain types in composite types in the pg_row extension (jeremyevans)
* Add Database.extension, for loading an extension into all future databases (jeremyevans)
* Support a :search_path Database option for setting PostgreSQL search_path (jeremyevans)
* Support a :convert_infinite_timestamps Database option in the postgres adapter (jeremyevans)
* Support a :use_iso_date_format Database option in the postgres adapter, for per-Database specific behavior (jeremyevans)
* Add Model.default_set_fields_options, for having a model-wide default setting (jeremyevans)
* Make Model.map, .to_hash, and .to_hash_groups work without a query when using the static_cache plugin (jeremyevans)
* Support :hash_dup and Proc Model inherited instance variable types (jeremyevans)
* Handle aliased tables in the pg_row plugin (jeremyevans)
* Add input_transformer plugin, for automatically transform input to model column setters (jeremyevans)
* Add auto_validations plugin, for automatically adding not null, type, and unique validations (jeremyevans)
* Add validates_not_null to validation_helpers (jeremyevans)
* Add :setter, :adder, :remover, and :clearer association options for overriding the default modification behavior (jeremyevans)
* Add Database#register_array_type to the pg_array extension, for registering database-specific array types (jeremyevans)
* Speed up fetching model instances when using update_primary_key plugin (jeremyevans)
* In the update_primary_key plugin, if the primary key column changes, clear related associations (jeremyevans)
* Add :allow_missing_migration_files option to migrators, for not raising if migration files are missing (bporterfield) (#652)
* Fix race condition related to prepared_sql for newly prepared statements (jeremyevans) (#651)
* Support :keep_reference=>false Database option for not adding reference to Sequel::DATABASES (jeremyevans)
* Make Postgres::HStoreOp#- explicitly cast a string argument to text, to avoid PostgreSQL assuming it is an hstore (jeremyevans)
* Add validates_schema_types validation for validating column values are instances of an appropriate class (jeremyevans)
* Allow validates_type validation to accept an array of allowable classes (jeremyevans)
* Add Database#schema_type_class for getting the ruby class or classes related to the type symbol (jeremyevans)
* Add error_splitter plugin, for splitting multi-column errors into separate errors per column (jeremyevans)
* Skip validates_unique validation if underlying columns are not valid (jeremyevans)
* Allow Model#modified! to take an optional column argument and mark that column as being modified (jeremyevans)
* Allow Model#modified? to take an optional column argument and check if that column has been modified (jeremyevans)
* Make Model.count not issue a database query if using the static_cache plugin (jeremyevans)
* Handle more corner cases in the many_to_one_pk_lookup plugin (jeremyevans)
* Handle database connection during initialization in jdbc adapter (jeremyevans) (#646)
* Add Database.after_initialize, which takes a block and calls the block with each newly created Database instance (ged) (#641)
* Add a guide detailing PostgreSQL-specific support (jeremyevans)
* Make model plugins deal with frozen instances (jeremyevans)
* Allow freezing of model instances for models without primary keys (jeremyevans)
* Reflect constraint_validations extension :allow_nil=>true setting in the database constraints (jeremyevans)
* Add Plugins.after_set_dataset for easily running code after set_dataset (jeremyevans)
* Add Plugins.inherited_instance_variables for easily setting class instance variables when subclassing (jeremyevans)
* Add Plugins.def_dataset_methods for easily defining class methods that call dataset methods (jeremyevans)
* Make lazy_attributes plugin no longer depend on identity_map plugin (jeremyevans)
* Make Dataset#get with an array of values handle case where no row is returned (jeremyevans)
* Make caching plugin handle memcached API for deletes if ignore_exceptions option is used (rintaun) (#639)
=== 3.46.0 (2013-04-02)
* Add Dataset#cross_apply and Dataset#outer_apply on Microsoft SQL Server (jeremyevans)
* Speed up threaded connection pools when :connection_handling=>:queue is used (jeremyevans)
* Allow external connection pool classes to be loaded automatically (jeremyevans)
* Add Dataset#with_pk! for model datasets, like #with_pk, but raising instead of returning nil (jeremyevans)
* Add Dataset#first!, like #first, but raising a Sequel::NoMatchingRow exception instead of returning nil (jeremyevans)
* Dataset #select_map, #select_order_map, and #get no longer support a plain string inside an array of arguments (jeremyevans)
* Escape ] characters in identifiers on Microsoft SQL Server (jeremyevans)
* Add security guide (jeremyevans)
* Make validates_type handle false values correctly (jeremyevans) (#636)
* Have associations, composition, serialization, and dirty plugins clear caches in some additional cases (jeremyevans) (#635)
* Add alter_table drop_foreign_key method for dropping foreign keys by column names (raxoft, jeremyevans) (#627)
* Allow creation named column constraints via :*_constraint_name column options (jeremyevans)
* Handle drop_constraint :type=>:primary_key on H2 (jeremyevans)
* Handle infinite dates in the postgres adapter using Database#convert_infinite_timestamps (jeremyevans)
* Make the looser_typecasting extension use looser typecasting for decimal columns as well as integers and floats (jeremyevans)
* Do strict typecasting of decimal columns by default, similar to integer/float typecasting (jeremyevans)
=== 3.45.0 (2013-03-01)
* Remove bad model typecasting of money type on PostgreSQL (jeremyevans) (#624)
* Use simplecov instead of rcov for coverage testing on 1.9+ (jeremyevans)
* Make the Database#quote_identifier method public (jeremyevans)
* Make PostgreSQL metadata parsing handle tables with the same name in multiple schemas (jeremyevans)
* Switch query extension to use a proxy instead of Object#extend (chanks, jeremyevans)
* Remove Dataset#def_mutiation_method instance method (jeremyevans)
* Make foreign key parsing on MySQL not pick up foreign keys in other databases (jeremyevans)
* Allow per-instance overrides of Postgres.force_standard_strings and .client_min_messages (jeremyevans) (#618)
* Add Sequel.tzinfo_disambiguator= to the named_timezones plugin for automatically handling TZInfo::AmbiguousTime exceptions (jeremyevans) (#616)
* Add Dataset#escape_like, for escaping LIKE metacharacters (jeremyevans) (#614)
* The LIKE operators now use an explicit ESCAPE '\' clause for similar behavior across databases (jeremyevans)
* Make Database#tables and #views accept a :qualify option on PostgreSQL to return qualified identifiers (jeremyevans)
* Make json_serializer and xml_serializer plugins secure by default (jeremyevans)
* Address JSON.parse vulnerabilities (jeremyevans)
* Fix Dataset#from_self! to no longer create a self-referential dataset (jeremyevans)
* Use SQLSTATE or database error codes if available instead of regexp parsing for more specific DatabaseErrors (jeremyevans)
* Add unlimited_update plugin to work around MySQL warning in replicated environments (jeremyevans)
* Add the :retry_on and :num_retries transaction options for automatically retrying transactions (jeremyevans)
* Raise serialization failures/deadlocks as Sequel::SerializationFailure exceptions (jeremyevans)
* Support transaction isolation levels on Oracle and DB2 (jeremyevans)
* Support transaction isolation levels when using the JDBC transaction support (jeremyevans)
=== 3.44.0 (2013-02-04)
* Speedup mysql2 adapter with identifier output method fetch speed by up to 50% (jeremyevans)
* Speedup tinytds adapter fetch speed by up to 60% (jeremyevans)
* Expand columns_introspection extension to consider cached schema values in the database (jeremyevans)
* Expand columns_introspection extension to handle subselects (jeremyevans)
* Have #last and #paged_each for model datasets order by the model's primary key by default (jeremyevans)
* Improve emulated offset support to handle subqueries (jeremyevans)
* Remove use of Object#extend from the eager_each plugin (jeremyevans)
* Add support for temporary views on SQLite and PostgreSQL via the :temp option to create_view (chanks, jeremyevans)
* Emulate Database#create_or_replace_view if not supported directly (jeremyevans)
* Add Dataset#paged_each, for processing entire datasets without keeping all rows in memory (jeremyevans)
* Add Sequel::ConstraintViolation exception class and subclasses for easier exception handling (jeremyevans)
* Fix use of identity_map plugin with many_to_many associations with right composite keys (chanks) (#603)
* Increase virtual row performance by using a shared VirtualRow instance (jeremyevans)
* Allow the :dataset association option to accept the association reflection as an argument (jeremyevans)
* Improve association method performance by caching intermediate dataset (jeremyevans)
=== 3.43.0 (2013-01-08)
* Move the #meta_def support for Database, Dataset, and Model to the meta_def extension (jeremyevans)
* Fix Database#copy_into on jdbc/postgres when an exception is raised (jeremyevans)
* Add core_refinements extension, providing refinement versions of Sequel's core extensions (jeremyevans)
* Make Database#copy_into raise a DatabaseError if the database signals an error in the postgres adapter (jeremyevans)
* Define respond_to_missing? where method_missing is defined and the object supports respond_to? (jeremyevans)
* Allow lambda procs with 0 arity as virtual row blocks on ruby 1.9 (jeremyevans)
* Handle schema-qualified row_types in the pg_array integration in the pg_row extension (jeremyevans) (#595)
* Support default_schema when reseting primary key sequences on PostgreSQL (jeremyevans) (#596)
* Allow treating tinyint(1) unsigned columns as booleans in the mysql adapters (jeremyevans)
* Support the jdbc-hsqldb gem in the jdbc adapter, since it has been updated to 2.2.9 (jeremyevans)
* Work with new jdbc-* gems that require manual driver loading (kares) (#598)
* Cast blobs correctly on DB2 when use_clob_as_blob is false (mluu, jeremyevans) (#594)
* Add date_arithmetic extension for database-independent date calculations (jeremyevans)
* Make Database#schema handle [host.]database.schema.table qualified tables on Microsoft SQL Server (jeremyevans)
* Add Dataset#split_qualifiers helper method for splitting a qualifier identifier into array of strings (jeremyevans)
* Make Database#schema_and_table always return strings for the schema and table (jeremyevans)
* Skip stripping of blob columns in the string_stripper plugin (jeremyevans) (#593)
* Allow Dataset#get to take an array to return multiple values, similar to map/select_map (jeremyevans)
* Default :prefetch_rows to 100 in the Oracle adapter (andrewhr) (#592)
=== 3.42.0 (2012-12-03)
* If an exception occurs while committing a transaction, attempt to rollback (jeremyevans)
* Support setting default string column sizes on a per-Database basis via default_string_column_size (jeremyevans)
* Reset Model.instance_dataset when extending the model's dataset (jeremyevans)
* Make the force_encoding plugin work with frozen strings (jeremyevans)
* Add Database#do on PostgreSQL for using the DO anonymous code block execution statement (jeremyevans)
* Remove Model.dataset_methods (jeremyevans)
* Allow subset to be called inside a dataset_module block (jeremyevans)
* Make Dataset#avg, #interval, #min, #max, #range, and #sum accept virtual row blocks (jeremyevans)
* Make Dataset#count use a subselect when the dataset has an offset without a limit (jeremyevans) (#587)
* Dump deferrable status of unique indexes on PostgreSQL (radford) (#583)
* Extend deferrable constraint support to all types of constraints, not just foreign keys (radford, jeremyevans) (#583)
* Support Database#copy_table and #copy_into on jdbc/postgres (bdon) (#580)
* Make Dataset#update not use a limit (TOP) on Microsoft SQL Server 2000 (jeremyevans) (#578)
=== 3.41.0 (2012-11-01)
* Add bin/sequel usage guide (jeremyevans)
* Make Dataset#reverse and #reverse_order accept virtual row blocks (jeremyevans)
* Add Sequel.delay for generic delayed evaluation (jeremyevans)
* Make uniqueness validations correctly handle nil values (jeremyevans)
* Support :unlogged option for create_table on PostgreSQL (JonathanTron) (#575)
* Add ConnectionPool#pool_type to get the type of connection pool in use (jeremyevans)
* Explicitly mark primary keys as NOT NULL on SQLite (jeremyevans)
* Add support for renaming primary key columns on MySQL (jeremyevans)
* Add connection_validator extension for automatically checking connections and transparently handling disconnects (jeremyevans)
* Add Database#valid_connection? for checking whether a given connection is valid (jeremyevans)
* Make dataset.limit(nil, nil) reset offset as well as limit (jeremyevans) (#571)
* Support IMMEDIATE/EXCLUSIVE/DEFERRED transaction modes on SQLite (Eric Wong)
* Major change in the Database <-> ConnectionPool interface (jeremyevans)
* Make touch plugin handle touching of many_*_many associations (jeremyevans)
* Make single_table_inheritance plugin handle non-bijective mappings (hannesg) (#567)
* Support foreign key parsing on MSSQL (munkyboy) (#564)
* Include SQL::AliasMethods in most pg_* extension objects (treydempsey, jeremyevans) (#563)
* Handle failure to create a prepared statement better in the postgres, mysql, and mysql2 adapters (jeremyevans) (#560)
* Treat clob columns as strings instead of blobs (jeremyevans)
=== 3.40.0 (2012-09-26)
* Add a cubrid adapter for accessing CUBRID databases via the cubrid gem (jeremyevans)
* Add a jdbc/cubrid adapter for accessing CUBRID databases via JDBC on JRuby (jeremyevans)
* Return OCI8::CLOB values as ruby Strings in the Oracle adapter (jeremyevans)
* Use clob for String :text=>true types on Oracle, DB2, HSQLDB, and Derby (jeremyevans) (#555)
* Allowing marshalling of Sequel::Postgres::HStore (jeremyevans) (#556)
* Quote channel identifier names when using LISTEN/NOTIFY on PostgreSQL (jeremyevans)
* Handle nil values when formatting bound variable arguments in the pg_row extension (jeremyevans) (#548)
* Handle nil values when parsing composite types in the pg_row extension (jeremyevans) (#548)
* Add :disconnect=>:retry option to Database#transaction, for automatically retrying the transaction on disconnect (jeremyevans)
* Greatly improved support on Microsoft Access (jeremyevans)
* Support Database#{schema,tables,views,indexes,foreign_key_list} when using ado/access adapter (ericgj) (#545, #546)
* Add ado/access adapter for accessing Microsoft Access via the ado adapter (jeremyevans)
* Combine disconnect error detection for mysql and mysql2 adapters (jeremyevans)
* Update the association_pks plugin to handle composite primary keys (chanks, jeremyevans) (#544)
=== 3.39.0 (2012-09-01)
* Fix defaults_setter to set false default values (jeremyevans)
* Fix serial sequence query in Database#primary_key_sequence on PostgreSQL (jeremyevans) (#538)
* Add Database#copy_into when using postgres adapter with pg driver, for very fast inserts into tables (jeremyevans)
* Combine multiple alter_table operations into a single query where possible on MySQL and PostgreSQL (jeremyevans)
* Handle sets of alter_table operations on MySQL and MSSQL where later operations depend on earlier ones (jeremyevans)
* Add constraint_validations plugin for automatic validations of constaints defined by extension (jeremyevans)
* Add constraint_validations extension for defining database constraints similar to validations (jeremyevans)
* Add Database#supports_regexp? for checking for regular expression support (jeremyevans)
* Add Sequel.trim for cross platform trim function (jeremyevans)
* Add Sequel.char_length for cross platform char_length function (jeremyevans)
* Fixing caching of MySQL server version (hannesg) (#536)
* Allow overriding the convert_tinyint_to_bool setting on a per-Dataset basis in the mysql and mysql2 adapters (jeremyevans)
* Make ValidationFailed and HookFailed exceptions have model method that returns the related model (jeremyevans)
* Automatically wrap array arguments to most PGArrayOp methods in PGArrays (jeremyevans)
* Add set_column_not_null to alter table generator for marking a column as not null (jeremyevans)
* Default second argument of set_column_allow_null to true in alter table generator (jeremyevans)
* Allow Dataset#count to take an argument or virtual row block (jeremyevans)
* Attempt to recognize CURRENT_{DATE,TIMESTAMP} defaults and return them as Sequel::CURRENT_{DATE,TIMESTAMP} (jeremyevans)
* Make dataset.insert(model) assume a single column if model uses the pg_row plugin (jeremyevans)
* No longer handle model instances in plain (non-model) datasets when inserting (jeremyevans)
* Use subselects for model classes as tables in join methods in model datasets if the model's dataset isn't a simple select (jeremyevans)
* No longer handle model classes as tables in join/graph methods in plain (non-model) datasets (jeremyevans)
* Make Time->DateTime and DateTime->Time typecasts retain fractional seconds on ruby 1.8 (jeremyevans) (#531)
* Add bin/sequel -c support, for running code string instead of using an IRB prompt (jeremyevans)
* Allow subclasses plugin to take a block, which is called with each subclasses created (jeremyevans)
* Add :where option to validates_unique, for custom uniqueness filters (jeremyevans)
* Add :connection_handling=>:disconnect option for threaded connection pools (jeremyevans)
* Add Postgres::PGRowOp#* for referencing the members of the composite type as separate columns (jeremyevans)
* Make identity_map plugin work with models lacking a primary key (jeremyevans)
* Recognize MySQL set type and default value (jeremyevans) (#529)
=== 3.38.0 (2012-08-01)
* Sequel now recognizes the double(x, y) and double(x, y) unsigned MySQL types (Slike9, jeremyevans) (#528)
* The swift subadapters now require swift-db-* instead of swift itself (deepfryed, jeremyevans) (#526)
* Add :textsize option to tinytds adapter to override the default TEXTSIZE (jeremyevans, wardrop) (#525)
* Support an output identifier method in the swift adapter (jeremyevans)
* Add Model#to_hash as an alias to Model#values (jeremyevans)
* When loading multiple pg_* extensions via Database#extension, only reset the conversion procs once (jeremyevans)
* Don't allow model typecasting from string to postgres array, hstore, or composite types (jeremyevans)
* Add pg_typecast_on_load plugin for converting advanced PostgreSQL types on load the {jdbc,do,swift}/postgres adapters (jeremyevans)
* Make all adapters that connect to PostgreSQL store type conversion procs (jeremyevans)
* Add type oid to column schema on PostgreSQL (jeremyevans)
* Add pg_row plugin, for using Sequel::Model classes to represent PostgreSQL row-valued/composite types (jeremyevans)
* Add pg_row_ops extension for DSL support for PostgreSQL row-valued/composite types (jeremyevans)
* Add pg_row extension for dealing with PostgreSQL row-valued/composite types (jeremyevans)
* Allow custom registered array types in the pg_array extension to be Database instance specific (jeremyevans)
* Remove Sequel::SQL::IdentifierMethods (jeremyevans)
* Don't have the schema_dumper extension produce code that relies on the core_extensions (jeremyevans)
* Fix dropping of columns with constraints on Microsoft SQL Server (mluu, jeremyevans) (#515, #518)
* Don't have pg_* extensions add methods to core classes unless the core_extensions extension is loaded (jeremyevans)
* Use real boolean literals on derby 10.7+ (jeremyevans, matthauck) (#514)
* Work around JRuby 1.6 ruby 1.9 mode bug in Time#nsec for Time prepared statement arguments on jdbc (jeremyevans)
* Handle blob prepared statement arguments on jdbc/db2 and jdbc/oracle (jeremyevans)
* Handle blob values in the swift adapter (jeremyevans)
* Handle better nil prepared statement arguments on jdbc (jeremyevans) (#513)
* Make SQL::Blob objects handle as, cast, and lit methods even if the core extensions are not loaded (jeremyevans)
* Make #* with no arguments produce a ColumnAll for Identifier and QualifiedIdentifier (jeremyevans)
* Sequel.expr(:symbol) now returns Identifier, QualifiedIdentifier, or AliasedExpression instead of Wrapper (jeremyevans)
* Treat clob columns as string instead of blob on Derby (jeremyevans) (#509)
=== 3.37.0 (2012-07-02)
* Allow specifying eager_graph alias base on a per-call basis using an AliasedExpression (jeremyevans)
* Allow bin/sequel to respect multiple -l options for logging to multiple files (jeremyevans)
* Correctly handle cases where SCOPE_IDENTITY is nil in the odbc/mssql adapter (stnoonan, jeremyevans)
* Add pg_interval extension, for returning interval types as ActiveSupport::Duration instances (jeremyevans)
* Save a new one_to_one associated object once instead of twice in the nested_attributes plugin (jeremyevans)
* Don't add unnecessary filter condition when passing a new object to a one_to_one setter method (jeremyevans)
* Differentiate between column references and method references in many_through_many associations (jeremyevans)
* Use :qualify=>:deep option when joining tables in model association datasets (jeremyevans)
* Support :qualify=>:deep option to Dataset#join_table to qualify subexpressions in the expression tree (jeremyevans)
* Support :qualify=>false option to Dataset#join_table to not automatically qualify keys/values (jeremyevans)
* Make filter by associations support use column references and method references correctly (jeremyevans)
* Call super in list plugin before_create (jeremyevans) (#504)
* Do not automatically cast String to text in pg_auto_parameterize extension (jeremyevans)
* Support alter_table validate_constraint on PostgreSQL for validating constraints previously declared with NOT VALID (jeremyevans)
* Support :not_valid option when adding foreign key constraints on PostgreSQL (jeremyevans)
* Support exclusion constraints on PostgreSQL (jeremyevans)
* Allow for overriding the create/alter table generators used per Database object (jeremyevans)
* Make casting to Date/(Time/DateTime) use date/datetime functions on SQLite (jeremyevans)
* Add pg_range_ops extension for DSL support for PostgreSQL range operators and functions (jeremyevans)
* The json library is now required when running the plugin/extension specs (jeremyevans)
* Use change migrations instead of up/down migrations in the schema_dumper (jeremyevans)
* Dump unsigned integer columns with a check >= 0 constraint in the schema_dumper (stu314)
* Switch the :key_hash entry to the association :eager_loader option to use the method symbol(s) instead of the column symbol(s) (jeremyevans)
* Add :id_map entry to the hash passed to the association :eager_loader option, for easier custom eager loading (jeremyevans)
* Fix dumping of non-integer foreign key columns in the schema_dumper (jeremyevans) (#502)
* Add nested_attributes :fields option to be a proc that is called with the associated object (chanks) (#498)
* Add split_array_nil extension, for compiling :col=>[1, nil] to col IN (1) OR col IS NULL (jeremyevans)
* Add Database#extension and Dataset#extension for loading extension modules into objects automatically (jeremyevans)
* Respect an existing dataset limit when updating on Microsoft SQL Server (jeremyevans)
* Add pg_range extension, for dealing with PostgreSQL 9.2+ range types (jeremyevans)
* Make pg_array extension convert array members when typecasting Array to PGArray (jeremyevans)
* Make jdbc/postgres adapter convert array type elements (e.g. date[] arrays are returned as arrays of Date instances) (jeremyevans)
* Make the pg_inet extension handle inet[]/cidr[]/macaddr[] types when used with the pg_array extension (jeremyevans)
* Make the pg_json extension handle json[] type when used with the pg_array extension (jeremyevans)
* Fix schema parsing of h2 clob types (jeremyevans)
* Make the pg_array extension handle array types for scalar types handled by the native postgres adapter (jeremyevans)
* Generalize handling of array types in the pg_array extension, allowing easy support of custom array types (jeremyevans)
* Remove type conversion of int2vector and money types on PostgreSQL, since previous conversions were wrong (jeremyevans)
* Add eval_inspect extension, which makes Sequel::SQL::Expression#inspect attempt to return a string suitable for eval (jeremyevans)
* When emulating offset with ROW_NUMBER, default to ordering by all columns if no specific order is given (stnoonan, jeremyevans) (#490)
* Work around JRuby 1.6 ruby 1.9 mode bug in Time -> SQLTime conversion (jeremyevans)
=== 3.36.1 (2012-06-01)
* Fix jdbc adapter when DriverManager#getConnection fails (aportnov) (#488)
=== 3.36.0 (2012-06-01)
* Use Bignum generic type when dumping unsigned integer types that could potentially overflow 32-bit signed integer values (stu314)
* Support :transform option in the nested_attributes plugin, for automatically preprocessing input hashes (chanks)
* Support :unmatched_pk option in the nested_attributes plugin, can be set to :create for associated objects with natural keys (chanks)
* Support composite primary keys in the nested_attributes plugin (chanks)
* Allow Model#from_json in the json_serializer plugin to use set_fields if a :fields option is given (jeremyevans)
* Support :using option to set_column_type on PostgreSQL, to force a specific conversion from the old value to the new value (jeremyevans)
* Drop indexes in the reverse order that they were added in the schema dumper (jeremyevans)
* Add :index_names option to schema dumper method, can be set to false or :namespace (stu314, jeremyevans)
* Add Database#global_index_namespace? for checking if index namespace is global or per table (jeremyevans)
* Fix typecasting of time columns on jdbc/postgres, before could be off by a millisecond (jeremyevans)
* Add document explaining Sequel's object model (jeremyevans)
* Attempt to detect more disconnect errors in the mysql2 adapter (jeremyevans)
* Add is_current? and check_current to the migrators, for checking/raising if there are unapplied migrations (pvh, jeremyevans) (#487)
* Add a jdbc subadapter for the Progress database (Michael Gliwinski, jeremyevans)
* Add pg_inet extension, for working with PostgreSQL inet and cidr types (jeremyevans)
* Fix bug in model column setters when passing an object that raises an exception for ==('') (jeremyevans)
* Add eager_each plugin, which makes each on an eagerly loaded dataset do eager loading (jeremyevans)
* Fix bugs when parsing foreign keys for tables with explicit schema on PostgreSQL (jeremyevans)
* Remove Database#case_sensitive_like on SQLite (jeremyevans)
* Remove Database#single_value in the native sqlite adapter (jeremyevans)
* Make Dataset#get work with nil and false arguments (jeremyevans)
* Make json_serializer plugin respect :root=>:collection and :root=>:instance options (jeremyevans)
* Support savepoints in prepared transactions on MySQL 5.5.23+ (jeremyevans)
* Add pg_json extension, for working with PostgreSQL 9.2's new json type (jeremyevans)
* In the optimistic locking plugin, make refresh and save after a failed save work correctly (jeremyevans)
* Support partial indexes on Microsoft SQL Server 2008 (jeremyevans)
* Make Database#call pass blocks (jeremyevans)
* Support :each when preparing statements, useful for iterating over large datasets (jeremyevans)
* Support :if_exists and :cascade options when dropping indexes on PostgreSQL (jeremyevans)
* Support :concurrently option when adding and dropping indexes on PostgreSQL (jeremyevans)
* Make Database#transaction on PostgreSQL recognize :synchronous, :read_only, and :deferrable options (jeremyevans)
* Support :sql_mode option when connecting to MySQL (jeremyevans)
* Apply :timeout MySQL connection setting on do, jdbc, and swift adapters (jeremyevans)
* Don't set Sequel::Model.db automatically when creating an anonymous class with an associated database object (jeremyevans)
* Add :connection_handling=>:queue option to the threaded connection pools, may reduce chance of stale connections (jeremyevans) (#481)
* Handle JRuby 1.7 exception handling changes when connecting in the jdbc adapter (jeremyevans) (#477)
* Make *_to_one association setters be noops if you pass a value that is the same as the cached value (jeremyevans)
* Make Model#refresh return self when using dirty plugin (jeremyevans)
=== 3.35.0 (2012-05-01)
* Correctly handle parsing schema for tables in other databases on MySQL (jeremyevans)
* Add DSL support for the modulus operator (%), similar to the bitwise operators (jeremyevans)
* Fix possible thread-safety issues on non-GVL ruby implementations (jeremyevans)
* Allow truncation of multiple tables at the same time on PostgreSQL (jeremyevans)
* Allow truncate to take a :cascade, :only, and :restart options on PostgreSQL (hgimenez, jeremyevans)
* Allow json and xml serializers to support :array option in class to_json method to serialize existing array of model instances (jeremyevans)
* Add dirty plugin, which saves the initial value of the column when the value is changed (jeremyevans)
* create_table now supports an :as option to create a table directly from the results of a query (jeremyevans)
* The :index option when creating columns in the schema generator can now be a hash of options passed to index (jeremyevans)
* Parsing the default column values in the oracle adapter no longer requires superuser privileges (Jason Hines)
* Add Database#cache_schema to allow schema caching to be turned of, useful for development modes where models are reloaded (jeremyevans)
* Correctly handle errors that occur when rolling back transactions (jeremyevans)
* Recognize identity type in the schema dumper (jeremyevans) (#468)
* Don't assign instance variables to Java objects, for future JRuby 2.0 support (jeremyevans) (#466)
* Use date and timestamp formats that are multilanguage and not DATEFORMAT dependent on Microsoft SQL Server (jeremyevans)
* Add Database#log_exception, which logs when a query raises an exception, for easier overriding (jeremyevans) (#465)
* Make the migrators only use transactions by default if the database supports transactional DDL (jeremyevans)
* Add Database#supports_transactional_ddl? for checking if DDL statements can be rolled back in transactions (jeremyevans)
* Don't use auto parameterization when using cursors in the pg_auto_parameterize extension (jeremyevans) (#463)
* No longer escape backslashes in strings by default, fixes doubled backslashes on some adapters (jeremyevans)
* Escape blackslash-carriage return-line feed in strings on Microsoft SQL Server (mluu, jeremyevans) (#462, #461)
* Remove Array#all_two_pairs? (jeremyevans)
* Remove Dataset#disable_insert_returning on PostgreSQL (jeremyevans)
* Remove support for PostgreSQL <8.2 (jeremyevans)
* Remove support for Ruby <1.8.7 (jeremyevans)
=== 3.34.1 (2012-04-02)
* Fix bug in optimization of primary key lookup (jeremyevans) (#460)
=== 3.34.0 (2012-04-02)
* Fix connection failures when connecting to PostgreSQL with newer versions of swift (jeremyevans)
* Fix using a bound variable for a limit in the ibmdb adapter on ruby 1.9 (jeremyevans)
* primary_key :column, :type=>Bignum now works correctly on H2 (jeremyevans)
* Add query_literals extension for treating regular strings like literal strings in select, group, and order methods (jeremyevans)
* Actually use RETURNING for deletes/updates on PostgreSQL 8.2-9.0 (jeremyevans)
* You can now require 'sequel/no_core_ext' to load Sequel without the core extensions (jeremyevans)
* The core extensions have now been made a real Sequel extension (still loaded by default) (jeremyevans)
* VirtualRow#` has been added for creating literal strings (jeremyevans)
* VirtualRow instances now have operator methods defined {+,-,*,/,&,|,~,>,<,>=,<=} (jeremyevans)
* Array#all_two_pairs? is now deprecated and will be removed after 3.34.0 is released (jeremyevans)
* All of Sequel's core extensions now have equivalent methods defined on the Sequel module (jeremyevans)
* Add Sequel.core_extensions? for checking if the core extensions are enabled (jeremyevans)
* Increase speed of Model#this by about 85% (jeremyevans)
* Increase speed of Model#delete and #destroy by about 75% for models with simple datasets (jeremyevans)
* Make nested_attributes plugin work when destroying/removing associated objects when strict_param_setting is true (r-stu31) (#455)
* Dataset#disable_insert_returning on PostgreSQL is now deprecated and will be removed after 3.34.0 is released (jeremyevans)
* Double speed of Model[pk] for models with simple datasets (most models) (jeremyevans)
* Support for ruby <1.8.7 and PostgreSQL <8.2 is now deprecated and will be removed after 3.34.0 is released (jeremyevans)
* Add select_remove extension which adds Dataset#select_remove for removing columns/expressions from a dataset selection (jeremyevans)
* Add static_cache plugin, for staticly caching all model instances, useful for model tables that don't change (jeremyevans)
* Add Model#freeze implementation to get a working frozen model object (jeremyevans)
* Add many_to_one_pk_lookup plugin, for using a simple primary key lookup for many_to_one associations (great with caching) (jeremyevans)
* Use bigint type instead of integer for Bignum generic type on SQLite, except for auto incrementing primary keys (jeremyevans)
* Add Database#dump_foreign_key_migration for just dumping foreign key constraints to the schema dumper extension (jeremyevans)
* Dump foreign key constraints by default when using the schema dumper extension (jeremyevans)
* Don't raise an error when no indexes exist for a table when calling Database#indexes on the jdbc/sqlite adapter (jeremyevans)
* Copy composite foreign key constraints when emulating alter_table on SQLite (jeremyevans)
* Add Database#foreign_key_list for getting foreign key metadata for a given table on SQLite, MySQL, and PostgreSQL (jeremyevans)
* Add Dataset#to_hash_groups and #select_hash_groups for getting a hash with arrays of matching values (jeremyevans)
* Model#set_fields and #update_fields now respect :missing=>:skip and :missing=>:raise options for handling missing values (jeremyevans)
* The :on_update and :on_delete entries for foreign key can now take strings, which are used literally (jeremyevans)
* Add Database#convert_infinite_timestamps to the postgres adapter, can be set to :nil, :string, or :float (jeremyevans) (#454)
* Add Database#create_join_table and #drop_join_table for easily creating many-to-many join tables (jeremyevans)
* Fix Dataset#group_rollup/#group_cube on Microsoft SQL Server 2005 (jeremyevans)
* Add Dataset#explain on MySQL (jeremyevans)
* Change formatting and return value of Dataset#explain on SQLite (jeremyevans)
* Recognize unsigned tinyint types in the schema dumper (jeremyevans)
* Add null_dataset extension, for creating a dataset that never issues a database query (jeremyevans)
* Database#uri and #url now return nil if a connection string was not used when connecting (jeremyevans) (#453)
* Add schema_caching extension, to speed up loading a large number of models by loading cached schema information from a file (jeremyevans)
* Add Dataset#multi_replace on MySQL, allowing you to REPLACE multiple rows in a single query (danielb2) (#452)
* Double speed of Model#new with empty hash, and quadruple speed of Model#set with empty hash (jeremyevans)
* Allow SQL::QualifiedIdentifier objects to contain arbitrary Sequel expressions (jeremyevans)
* Add pg_hstore_ops extension, for easily calling PostgreSQL hstore functions and operators (jeremyevans)
* Add Sequel::SQL::Wrapper class for easier dealing with wrapper objects (jeremyevans)
* Add pg_hstore extension, for dealing with the PostgreSQL hstore (key/value table) type (jeremyevans)
* Add Database#type_supported? method on PostgreSQL for checking if the given type symbol/string is supported (jeremyevans)
* Convert Java::OrgPostgresqlUtil::PGobject instances to ruby strings in jdbc/postgres type conversion (jeremyevans)
* Allow PlaceholderLiteralString objects to store placeholder string as an array for improved performance (jeremyevans)
* Work around ruby-pg bugs 111 (Time/DateTime fractional seconds) and 112 ("\0" in bytea) in bound variable arguments (jeremyevans) (#450)
* Handle fractional seconds correctly for time type on jdbc/postgres (jeremyevans)
* Add pg_array_ops extension, for easily calling PostgreSQL array functions and operators (jeremyevans)
* Add SQL::Subscript#[] for using nested subscripts (accessing member of multi-dimensional array) (jeremyevans)
* Add Model.cache_anonymous_models accessor so you can disable the caching of classes created by Sequel::Model() (jeremyevans)
* Convert PostgreSQL JDBC arrays to Ruby arrays in the jdbc/postgres adapter (jeremyevans)
* The typecast_on_load extension now works correctly when saving new model objects when insert_select is enabled (jeremyevans)
* Add pg_array extension, for dealing with string and numeric PostgreSQL arrays (jeremyevans)
* Add Database#reset_conversion_procs to the postgres adapter, for use with extensions with modify default conversion procs (jeremyevans)
* Escape table and schema names when getting primary key or sequence information on PostgreSQL (jeremyevans)
* Escape identifiers when quoting on MySQL and SQLite (jeremyevans)
* Add Database#supports_drop_table_if_exists? for checking if DROP TABLE supports IF EXISTS (jeremyevans)
* Add Database#drop_table? for dropping a table if it already exists (jeremyevans)
* Log full SQL string by default for prepared statements created automatically by model prepared_statements* plugins (jeremyevans)
* Add ability for prepared statements to log full SQL string (jeremyevans)
* Add pg_statement_cache extension, for automatically preparing queries when using postgres adapter with pg driver (jeremyevans)
* Add pg_auto_parameterize extension, for automatically parameterizing queries when using postgres adapter with pg driver (jeremyevans)
* Add ConnectionPool#disconnection_proc= method, to modify disconnection_proc after the pool has been created (jeremyevans)
* Add ConnectionPool#after_connect= method, to modify after_connect proc after the pool has been created (jeremyevans)
* Add ConnectionPool#all_connections method, which yields all available connections in the pool (jeremyevans)
=== 3.33.0 (2012-03-01)
* Add ability to force or disable transactions completely in the migrators using the :use_transactions option (jeremyevans)
* Add ability to turn off transactions for migrations by calling no_transaction inside the Sequel.migration block (jeremyevans)
* Allow specifically choosing which migrator to use via TimestampMigrator.apply or IntegerMigrator.apply (jeremyevans)
* Add arbitrary_servers extension to allow the use of arbitrary servers/shards by providing a hash of options as the server (jeremyevans)
* Add server_block extension to scope database access inside the block to a specific default server/shard (jeremyevans)
* Respect :collate column option on MySQL (jeremyevans) (#445)
* Use Mysql2::Client::FOUND_ROWS to get accurate number of rows matched in the mysql2 adapter (jeremyevans)
* Use Mysql#info to get accurate number of rows matched in the mysql adapter (jeremyevans)
* Make mock adapter with specific SQL dialect use appropriate defaults for quoting identifiers (jeremyevans)
* Make list plugin automatically set position field value on creation if not already set (jeremyevans)
* Add Database#integer_booleans setting on SQLite to store booleans as integers (jeremyevans)
* Typecast columns stored as integers/floats in the SQLite adapter (jeremyevans)
* In the instance_hooks plugin, (before|after)_*_hook instance methods now return self (jeremyevans)
* Handle NaN, Infinity, and -Infinity floats on PostgreSQL (kf8a, jeremyevans) (#444)
* Support an :sslmode option when using the postgres adapter with the pg driver (jeremyevans)
* Add Database#create_schema and #drop_schema to the shared postgres adapter (tkellen, jeremyevans) (#440)
* Add Database#supports_savepoints_in_prepared_transactions?, false on MySQL >=5.5.12 (jeremyevans) (#437)
* Support an identifier output method in the mysql2 adapter (jeremyevans)
* Make foreign key creation work on MySQL with InnoDB engine without specifying :key option (jeremyevans)
* Allow disabling use of sudo with SUDO='' when running the rake install/uninstall tasks (jeremyevans) (#433)
=== 3.32.0 (2012-02-01)
* Make serialization_modification_detection plugin work correctly with new objects and after saving existing objects (jeremyevans) (#432)
* Make refreshes after model creation clear the deserialized values in the serialization plugin (jeremyevans)
* Add Dataset#update_ignore on MySQL, for using UPDATE IGNORE in queries (danielb2) (#429)
* Allow select_map/select_order_map to take both a column argument and a block (jeremyevans)
* Fix virtual row block handling in select_map/select_order_map if block returns an array (jeremyevans) (#428)
* Add Sequel.empty_array_handle_nulls setting, can be set to false for possible better performance on some databases (jeremyevans)
* Change exclude(:b=>[]) to not return rows where b is NULL (jeremyevans) (#427)
* Support ActiveModel 3.2 in the active_model plugin, by adding support for to_partial_path (jeremyevans)
* Fix metadata methods (e.g. tables) on Oracle when custom identifier input methods are used (jeremyevans)
* Fix Database#indexes on DB2 (jeremyevans)
* Make DateTime/Time columns with Sequel::CURRENT_TIMESTAMP default values use timestamp column on MySQL (jeremyevans)
* Wrap column default values in extra parens on SQLite, fixes some cases (jeremyevans)
* Make Database#indexes not include primary key indexes on Derby, HSQLDB, Oracle, and DB2 using the jdbc adapter (jeremyevans)
* Support Database#indexes in shared MSSQL adapter (jeremyevans)
* Support :include option when creating indexes on MSSQL, for storing column values in the index (crawlik) (#426)
* Make set_column_type not modify defaults and NULL/NOT NULL setting on MSSQL, H2, and SQLite (jeremyevans)
* Qualify identifiers when filtering/excluding by associations (jeremyevans)
* Make table_exists? better handle tables where you don't have permissions for all columns (jeremyevans) (#422)
* Using new association options, support associations based on columns that clash with ruby method names (jeremyevans) (#417)
* Add use_after_commit_rollback setting to models, can be turned off to allow model usage with prepared transactions (jeremyevans)
* Fix alter table emulation on SQLite when foreign keys reference the table being altered (jeremyevans)
* Fix progress shared adapter, broken since the dataset literalization refactoring (jeremyevans) (#414)
* Support :map and :to_hash prepared statement types (jeremyevans)
* Make Dataset#naked! work correctly (jeremyevans)
* Remove Dataset#paginate!, as it was broken (jeremyevans)
* Fix query extension to not break usage of #clone without arguments (jeremyevans) (#413)
=== 3.31.0 (2012-01-03)
* Dataset#from no longer handles :a__b__c___d as a.b.c AS d (jeremyevans)
* Support many_to_one associations with the same name as their column, using the :key_column option (jeremyevans)
* Add Model.def_column_alias for defining alias methods for columns (jeremyevans)
* Support :server option in Dataset#import and #multi_insert (jeremyevans)
* Respect existing RETURNING/OUTPUT clauses in #import/#multi_insert on PostgreSQL/MSSQL (jeremyevans)
* Support :return=>:primary_key option to Dataset#import and #multi_insert (jeremyevans)
* Correctly handle return value for Dataset#insert with column array and value array on PostgreSQL <8.2 (jeremyevans)
* Dataset#insert_multiple now returns an array of inserted primary keys (jeremyevans) (#408)
* Support RETURNING with DELETE and UPDATE on PostgreSQL 8.2+ (funny-falcon)
* Raise error if tables from two separate schema are detected when parsing the schema for a single table on PostgreSQL (jeremyevans)
* Handle clob types as string instead of blob on H2 (jeremyevans)
* Add database type support to the mock adapter, e.g. mock://postgres (jeremyevans)
* Allow creation of full text indexes on Microsoft SQL Server, but you need to provide a :key_index option (jeremyevans)
* Allow Dataset#full_text_search usage with prepared statements (jeremyevans)
* Make Dataset#exists use a PlaceholderLiteralString so it works with prepared statements (jeremyevans)
* Fix Dataset#empty? for datasets with offsets when offset support is emulated (jeremyevans)
* Add Dataset#group_rollup and #group_cube methods for GROUP BY ROLLUP and CUBE support (jeremyevans)
* Add support for custom serialization formats to the serialization plugin (jeremyevans)
* Support a :login_timeout option in the jdbc adapter (glebpom) (#406)
=== 3.30.0 (2011-12-01)
* Handle usage of on_duplicate_key_update in MySQL prepared statements (jeremyevans) (#404)
* Make after_commit and after_rollback respect :server option (jeremyevans) (#401)
* Respect :connect_timeout option in the postgres adapter when using pg (glebpom, jeremyevans) (#402)
* Make Dataset#destroy for model datasets respect dataset shard when using a transaction (jeremyevans)
* Make :server option to Model#save set the shard to use (jeremyevans)
* Move Model#set_server from the sharding plugin to the base plugin (jeremyevans)
* Add :graph_alias_base association option for setting base name to use for table aliases when eager graphing (jeremyevans)
* Make ILIKE work correctly on Microsoft SQL Server if database/column collation is case sensitive (jfirebaugh) (#398)
* When starting a new dataset graph, assume existing selection is the columns to select from the current table (jeremyevans)
* Allow specifying nanoseconds and offsets when converting a hash or array to a timestamp (jeremyevans, jfirebaugh) (#395)
* Improve performance when converting Java types to ruby types in the jdbc adapter (jeremyevans, jfirebaugh) (#395)
* Fix tinytds adapter if DB.identifier_output_method = nil (jeremyevans)
* Explicitly order by the row number column when emulating offsets (jfirebaugh) (#393)
* Fix Dataset#graph and #eager_graph modifying the receiver if the receiver is already graphed (jeremyevans) (#392)
* Change dataset literalization to an append-only-all-the-way-down design (jeremyevans)
=== 3.29.0 (2011-11-01)
* Allow Model.dataset_module to take a Module instance (jeremyevans)
* Apply Model.[] optimization in more cases (jeremyevans)
* Fix Model.[] optimization when dataset uses identifier_input_method different than database (jeremyevans)
* Work around pragma bug on jdbc/sqlite when emulating alter table support (jeremyevans)
* Database#<< and Dataset#<< now return self so they can be safely chained (jeremyevans)
* Fully support using an aliased table name as the :join_table option for a many_to_many association (jeremyevans)
* Make like case sensitive on SQLite and Microsoft SQL Server (use ilike for case insensitive matching) (jeremyevans)
* Add Database#extend_datasets for the equivalent of extending of the Database object's datasets with a module (jeremyevans)
* Speed up Dataset #map, #to_hash, and related methods if an array of symbols is given (jeremyevans)
* Add Database#dataset_class for modifying the class used for datasets for a single Database object (jeremyevans)
* Plugins that override Model.load should be modified to override Model.call instead (jeremyevans)
* Speed up loading model objects from the database by up to 7-16% (jeremyevans)
* Create accessor methods for all columns in a model's table, even if the dataset doesn't select the columns (jeremyevans)
* Add mock adapter for better mocking of a database connection (jeremyevans)
* Have models pass their dataset instead of table name to Database#schema (jeremyevans)
* Allow Database#schema to take a dataset as the table argument, and use its identifier input/output methods (jeremyevans)
* Significant improvements to the db2 adapter (jeremyevans)
* Handle methods with names that can't be called directly in Model.def_dataset_method (jeremyevans)
* Add dataset_associations plugin for making dataset methods that return datasets of associated objects (jeremyevans)
* Don't allow Model.def_dataset_method to override private model methods (jeremyevans)
* Parsing primary key information from system tables in the shared MSSQL adapter (jeremyevans)
* Fix handling of composite primary keys when emulating alter table operations on SQLite (jeremyevans)
* Emulate add_constraint and drop_constraint alter table operations on SQLite (jeremyevans)
* Apply the correct pragmas when connecting to SQLite via the Amalgalite and Swift adapters (jeremyevans)
* Fix bound variable usage for some types (e.g. Date) when used outside of prepared statements on SQLite (jeremyevans)
* Work around SQLite column naming bug when using subselects (jeremyevans)
* Make prepared_statements plugin work with adapters that require type specifiers for variable placeholders, such as oracle (jeremyevans)
* Add savepoint support to the generic JDBC transaction support (used by 6 jdbc subadapters) (jeremyevans)
* Add native prepared statement support to the oracle adapter (jeremyevans)
* Support sharding correctly by default when using transactions in model saving/destroying (jeremyevans)
* Add Database#in_transaction? method for checking if you are already in a transaction (jeremyevans)
* Add after_commit, after_rollback, after_destroy_commit, and after_destroy_rollback hooks to Model objects (jeremyevans)
* Add after_commit and after_rollback hooks to Database objects (jeremyevans) (#383)
* Support savepoints inside prepared transactions on MySQL (jeremyevans)
* Support opening transactions to multiple shards of the same Database object in the same Thread (jeremyevans)
* Add Sequel.transaction for running transactions on multiple databases at the same time (jeremyevans)
* Support :rollback => :always option in Database#transaction to always rollback the transaction (jeremyevans)
* Support :rollback => :reraise option in Database#transaction to reraise the Sequel::Rollback exception (jeremyevans)
* Add support for connecting to Apache Derby databases using the jdbc adapter (jeremyevans)
* Add support for connecting to HSQLDB databases using the jdbc adapter (jeremyevans)
* Fix inserting all default values into a table on DB2 (jeremyevans)
* Add :qualify option to many_to_one associations for whether to qualify the primary key column with the associated table (jeremyevans)
* Modify rcte_tree plugin to use column aliases if recursive CTEs require them (jeremyevans)
* Add Dataset#recursive_cte_requires_column_aliases? method to check if you must provide an argument list for a recursive CTE (jeremyevans)
* Much better support for Oracle in both the oci8-based oracle adapter and the jdbc oracle subadapter (jeremyevans)
* Handle CTEs in subselects in more places on databases that don't natively support CTEs in subselects (jeremyevans)
* Change Dataset#to_hash to not call the row_proc if 2 arguments are given (jeremyevans)
* Change Dataset#map to not call the row_proc if an argument is given (jeremyevans)
* Make Dataset#select_map and #select_order_map return an array of single element arrays if given an array with a single symbol (jeremyevans)
* Make Dataset#columns work correctly on jdbc, odbc, ado, and dbi adapters when using an emulated offset on MSSQL and DB2 (jeremyevans)
* Add Database#listen and #notify to the postgres adapter, for LISTEN and NOTIFY support (jeremyevans)
* Emulate the bitwise compliment operator on h2 (jeremyevans)
* Fix improper handling of emulated bitwise operators with more than two arguments (jeremyevans)
* Allow convert_invalid_date_time to be set on a per-Database basis in the mysql adapter (jeremyevans)
* Allow convert_tinyint_to_bool to be set on a per-Database basis in the mysql and mysql2 adapters (jeremyevans)
* Allow per-Database override of the typeconversion procs on the mysql, sqlite, and ibmdb adapters (jeremyevans)
* Add Database#timezone accessor, for overriding Sequel.database_timezone per Database object (jeremyevans)
=== 3.28.0 (2011-10-03)
* Add firebird jdbc subadapter (jeremyevans)
* Add SQLTime.create method for easier creation of SQLTime instances (jeremyevans)
* Make Dataset#with_pk use a qualified primary key, so it works correctly on joined datasets (jeremyevans)
* Support the :limit association option when using eager_graph (jeremyevans)
* Fix eager loading via eager_graph of one_to_one associations that match multiple associated objects and use order to pick the first one (jeremyevans)
* Make after_load association hooks apply when using eager_graph (jeremyevans)
* Make Dataset#with_sql treat a symbol as a first argument as a method name to call to get the SQL (jeremyevans)
* Make Dataset #delete, #insert, #update return array of plain hashes if block not given and Dataset#returning is used (jeremyevans)
* Allow Dataset #map, #to_hash, #select_map, #select_order_map, and #select_hash to take arrays of columns instead of single columns (jeremyevans)
* Make Dataset #delete, #insert, #update yield plain hashes to a block if Dataset#returning is used (jeremyevans)
* Add Dataset#returning for setting the columns to return in INSERT/UPDATE/DELETE statements, used by PostgreSQL 9.1 (jeremyevans)
* Support WITH clause in INSERT/UPDATE/DELETE on PostgreSQL 9.1+ (jeremyevans)
* Add Database#copy_table for PostgreSQL COPY support when using the postgres adapter with pg (jeremyevans)
* Support CREATE TABLE IF NOT EXISTS on PostgreSQL 9.1+ (jeremyevans)
* Add support for Sequel::Model.default_eager_limit_strategy to set the default :eager_limit_strategy for *_many associations (jeremyevans)
* Add support for an :eager_limit_strategy => :correlated_subquery value for limiting using correlated subqueries (jeremyevans)
* Allow use of a dataset that uses the emulated offset support on MSSQL and DB2 in an IN subquery by using a nested subquery (jeremyevans)
* Allow use of a dataset that uses LIMIT in an IN subquery on MySQL by using a nested subquery (jeremyevans)
* Work around serious ActiveSupport bug in Time.=== that breaks literalization of Time values (jeremyevans)
* Speed up SQL operator methods by using module_eval instead of define_method (jeremyevans)
* Support sql_(boolean,number,string) methods on ComplexExpressions, allowing you do to (x + 1).sql_string + 'a' for (x + 1) || 'a' (jeremyevans)
* Don't disallow SQL expression creation based on types, leave that to the database server (jeremyevans)
* Make :column [&|] 1 use an SQL bitwise [&|] expression instead of a logical (AND|OR) expression (jeremyevans)
* Make :column + 'a' use an SQL string concatenation expression instead of an addition expression (jeremyevans)
* Fix :time typecasting from Time to SQLTime for fractional seconds on ruby 1.9 (jeremyevans)
* Have Dataset#select_append check supports_select_all_and_column? and select all from all FROM and JOIN tables if no columns selected (jeremyevans)
* Add Dataset#supports_select_all_and_column? for checking if you can do SELECT *, column (jeremyevans)
* Add support for an :eager_limit_strategy => :window_function value for limiting using window functions (jeremyevans)
* Add support for an :eager_limit_strategy => :distinct_on value for one_to_one associations for using DISTINCT ON (jeremyevans)
* Add support for an :eager_limit_strategy association option, for manual control over how limiting is done (jeremyevans)
* Add Dataset#supports_ordered_distinct_on? for checking if the dataset can use distinct on while respecting order (jeremyevans)
* Add support for the association :limit option when eager loading via .eager for *_many associations (jeremyevans)
* Add db2 jdbc subadapter (jeremyevans)
* Fix the db2 adapter so it actually works (jeremyevans)
* Add ibmdb adapter for accessing DB2 (roylez, jeremyevans) (#376)
* Add much better support for DB2 databases (roylez, jeremyevans) (#376)
* Handle SQL::AliasedExpressions and SQL::JoinClauses in Dataset#select_all (jeremyevans)
* Speed up type translation slightly in mysql, postgres, and sqlite adapters (jeremyevans)
* Add Dataset#supports_cte_in_subqueries? for checking whether database supports WITH in subqueries (jeremyevans)
* Allow Model.set_dataset to accept Sequel::LiteralString arguments as table names (jeremyevans)
* Association :after_load hooks in lazy loading are now called after the associated objects have been cached (jeremyevans)
* Emulate handling of extract on MSSQL, using datepart (jeremyevans)
* Emulate handling of extract on SQLite, but you need to set Database#use_timestamp_timezones = false (jeremyevans)
* Abstract handling of ComplexExpressionMethods#extract so that it can work on databases that don't implement extract (jeremyevans)
* Emulate xor operator on SQLite (jeremyevans)
* Add Dataset#supports_where_true? for checking if the database supports WHERE true (or WHERE 1 if 1 is true) (jeremyevans)
* Fix eager loading via eager of one_to_one associations that match multiple associated objects and use order to pick the first one (jeremyevans)
=== 3.27.0 (2011-09-01)
* Add support for native prepared statements to the tinytds adapter (jeremyevans)
* Add support for native prepared statements and stored procedures to the mysql2 adapter (jeremyevans)
* Support dropping primary key, foreign key, and unique constraints on MySQL via the drop_constraint :type option (jeremyevans)
* Add Sequel::SQLTime class for handling SQL time columns (jeremyevans)
* Typecast DateTime objects to Date for date columns (jeremyevans)
* When typecasting Date objects to timestamps, make the resulting objects always have no fractional date components (jeremyevans)
* Add Model.dataset_module for simplifying many def_dataset_method calls (jeremyevans)
* Make prepared_statements_safe plugin work on classes without datasets (jeremyevans)
* Make Dataset#hash work correctly when referencing SQL::Expression instances (jeremyevans)
* Handle allowed mass assignment methods correctly when including modules in classes or extending instances with modules (jeremyevans)
* Fix Model#hash to work correctly with composite primary keys and with no primary key (jeremyevans)
* Model#exists? now returns false without issuing a query for new model objects (jeremyevans)
=== 3.26.0 (2011-08-01)
* Fix bug in default connection pool if a disconnect error is raised and the disconnection_proc also raises an error (jeremyevans)
* Disallow eager loading via eager of many_*_many associations with :eager_graph option (jeremyevans)
* Major speedup in dataset creation (jeremyevans)
* Replace internal implementation of eager_graph with much faster version (jeremyevans)
* Don't treat strings with leading zeros as octal format in the default typecasting (jeremyevans)
* Fix literalization of Date, Time, and DateTime values on Microsoft Access (jeremyevans)
* Fix handling of nil values with the pure-Java version of nokogiri in the xml_serializer plugin (jeremyevans)
* Make identity_map plugin work with standard eager loading of many_to_many and many_through_many associations (jeremyevans)
* Make create_table! only attempt to drop the table if it already exists (jeremyevans)
* Remove custom table_exists? implementations in the oracle and postgres adapters (jeremyevans)
* Handle another type of disconnection in the postgres adapter (jeremyevans)
* Handle disconnections in the ado adapter and do postgres subadapter (jeremyevans)
* Recognize disconnections when issuing BEGIN/ROLLBACK/COMMIT statements (jeremyevans) (#368)
=== 3.25.0 (2011-07-01)
* Work with tiny_tds-0.4.5 in the tinytds adapter, older versions are no longer supported (jeremyevans)
* Make association_pks plugin typecast provided values to integer if the primary key column type is integer (jeremyevans)
* Model.set_dataset now accepts Identifier, QualifiedIdentifier, and AliasedExpression arguments (jeremyevans)
* Fix handling of nil values in bound variables and prepared statement and stored procedure arguments in the jdbc adapter (jeremyevans, wei)
* Allow treating Datasets as Expressions, e.g. DB[:table1].select(:column1) > DB[:table2].select(:column2) (jeremyevans)
* No longer use CASCADE by default when dropping tables on PostgreSQL (jeremyevans)
* Support :cascade option to #drop_table, #drop_view, #drop_column, and #drop_constraint for using CASCADE (jeremyevans)
* If validation error messages are LiteralStrings, don't add the column name to them in Errors#full_messages (jeremyevans)
* Fix bug loading plugins on 1.9 where ::ClassMethods, ::InstanceMethods, or ::DatasetMethods is defined (jeremyevans)
* Add Dataset#exclude_where and Dataset#exclude_having methods, so you can force use of having or where clause (jeremyevans)
* Allow Dataset#select_all to take table name arguments and select all columns from each given table (jeremyevans)
* Add Dataset#select_group method, for selecting and grouping on the same columns (jeremyevans)
* Allow Dataset#group and Dataset#group_and_count to accept a virtual row block (jeremyevans)
=== 3.24.1 (2011-06-03)
* Ignore index creation errors if using create_table? with the IF NOT EXISTS syntax (jeremyevans) (#362)
=== 3.24.0 (2011-06-01)
* Add prepared_statements_association plugin, for using prepared statements by default for regular association loading (jeremyevans)
* Add prepared_statements_safe plugin, for making prepared statement use with models more safe (jeremyevans)
* Add prepared_statements_with_pk plugin, for using prepared statements for dataset lookups by primary key (jeremyevans)
* Fix bug in emulated prepared statement support not supporting nil or false as bound values (jeremyevans)
* Add Dataset#unbind for unbinding values from a dataset, for use with creating prepared statements (jeremyevans)
* Add prepared_statements plugin for using prepared statements for updates, inserts, deletes, and lookups by primary key (jeremyevans)
* Make Dataset#[] for model datasets consider a single integer argument as a lookup by primary key (jeremyevans)
* Add Dataset#with_pk for model datasets, for finding first record with matching primary key value (jeremyevans)
* Add defaults_setter plugin for setting default values when initializing model instances (jeremyevans)
* Add around hooks (e.g. around_save) to Sequel::Model (jeremyevans)
* Add Model#initialize_set private method to ease extension writing (jeremyevans)
* Only typecast bit fields to booleans on MSSQL, the MySQL bit type is a bitfield, not a boolean (jeremyevans)
* Set SQL_AUTO_IS_NULL=0 by default when connecting to MySQL via the swift and jdbc adapters (jeremyevans)
* Fix bug in multiple column IN/NOT IN emulation when a model dataset is used (jeremyevans)
* Add support for filtering and excluding by association datasets (jeremyevans)
* Fix literalization of boolean values in filters on SQLite and MSSQL (jeremyevans)
* Add support for filtering and excluding by multiple associations (jeremyevans)
* Add support for inverting some SQL::Constant instances such as TRUE, FALSE, NULL, and NOTNULL (jeremyevans)
* Add support for excluding by associations to model datasets (jeremyevans)
* The Sequel::Postgres.use_iso_date_format setting now only affects future Database objects (jeremyevans)
* Add Sequel::Postgres::PG_NAMED_TYPES hash for extensions to register type conversions for non-standard types (jeremyevans, pvh)
* Make create_table? use IF NOT EXISTS instead of using SELECT to determine existence, if supported (jeremyevans)
* Fix bug in association_pks plugin when associated table has a different primary key column name (jfirebaugh)
* Fix limiting rows when connecting to DB2 (semmons99)
* Exclude columns from tables in the INFORMATION_SCHEMA when parsing table schema on JDBC (jeremyevans)
* Fix limiting rows when connecting to Microsoft Access (jeremyevans)
* Add Database#views for getting an array of symbols of view names for the database (jeremyevans, christian.michon)
* Make Datbase#tables no longer include view names on MySQL (jeremyevans)
* Convert Java CLOB objects to ruby strings when using the JDBC JTDS subadapter (christian.michon)
* If Thread#kill is called on a thread with an open transaction, roll the transaction back on ruby 1.8 and rubinius (jeremyevans)
* Split informix adapter into shared/specific parts, add JDBC informix subadapter (jeremyevans)
=== 3.23.0 (2011-05-02)
* Migrate issue tracker from Google Code to GitHub Issues (jeremyevans)
* Add support for filtering by associations to model datasets (jeremyevans)
* Don't call insert_select when saving a model that doesn't select all columns of the table (jeremyevans)
* Fix bug when using :select=>[] option for a many_to_many association (jeremyevans)
* Add a columns_introspection extension that attempts to skip database queries by introspecting selected columns (jeremyevans)
* When combining old integer migrations and new timestamp migrations, make sure old integer migrations are all applied first (jeremyevans)
* Support dynamic callbacks to customize regular association loading at query time (jeremyevans)
* Support cascading of eager loading with dynamic callbacks for both eager and eager_graph (jeremyevans)
* Make the xml_serializer plugin handle namespaced models by using __ instead of / as a separator (jeremyevans)
* Allow the :eager_grapher association proc to accept a single hash instead of 3 arguments (jfirebaugh)
* Support dynamic callbacks to customize eager loading at query time (jfirebaugh, jeremyevans)
* Fix bug in the identity_map plugin for many_to_one associations when the association reflection hadn't been filled in yet (funny-falcon)
* Add serialization_modification_detection plugin for detecting changes in serialized columns (jeremyevans) (#333)
=== 3.22.0 (2011-04-01)
* Add disconnect detection to tinytds adapter, though correct behavior may require an update to tiny_tds (cult_hero)
* Add Dataset/Database#mssql_unicode_strings accessor when connecting to MSSQL to control string literalization (semmons99, jeremyevans)
* Fix ODBC::Time instance handling in the odbc adapter (jeremyevans)
* Use Sequel.application_timezone when connecting in the oracle adapter to set the connection's session's timezone (jmthomas)
* In the ADO adapter, assume access to SQL Server if a :conn_string option is given that doesn't indicate Access/Jet (damir.si) (#332)
* Use the correct class when loading instances for descendents of model classes that use single table inheritance (jeremyevans)
* Support for COLLATE in column definitions (jfirebaugh)
* Don't use a schema when creating a temporary table (jeremyevans)
* Make migrator work correctly when a default_schema is set (jeremyevans) (#331)
=== 3.21.0 (2011-03-01)
* Make symbol splitting (:table__column___alias) work correctly for identifiers that are not in the \w character class (authorNari)
* Enable row locks in Oracle (authorNari)
* Prefer cover? over include? for validates_includes/validates_inclusion_of (jeremyevans)
* Make using NULL/NOT NULL, DEFAULT, and UNIQUE column options work correctly on H2 and possibly Oracle (jeremyevans)
* Make bin/sequel accept file arguments and work correctly when $stdin is not a tty (jeremyevans)
* Add support for -I and -r options to bin/sequel (jeremyevans)
* Sequel::Model.plugin can now be overridden just like the other Model methods (jeremyevans)
* Add tinytds adapter, the best way to connect to MSSQL from a C based ruby running on *nix (jeremyevans)
* Recognize bigint unsigned as a Bignum type in the schema dumper (gamespy-tech) (#327)
* Add Dataset#calc_found_rows for MySQL datasets (macks)
* Add association_autoreloading plugin for clearing association cache when foreign key value changes (jfirebaugh, jeremyevans)
* Fix join_table on MySQL ignoring the block (jfirebaugh)
* Transfer CTE WITH clauses in subselect to main query when joining on MSSQL (jfirebaugh)
* Make specs support both RSpec 1 and RSpec 2 (jeremyevans)
* Work with ruby-informix versions >= 0.7.3 in the informix adapter (jeremyevans) (#326)
=== 3.20.0 (2011-02-01)
* Allow a :partial option to Database#indexes on MySQL to include partial indexes (roland.swingler) (#324)
* Add a SQLite subadapter to the swift adapter, now that swift supports it (jeremyevans)
* Update swift adapter to support swift 0.8.1, older versions no longer supported (jeremyevans)
* Allow setting arbitrary JDBC properties in the jdbc adapter with the :jdbc_properties option (jeremyevans)
* Use a better error message if a validates_max_length validation is applied to a nil value (jeremyevans) (#322)
* Add some basic Microsoft Access support to the ado adapter, autoincrementing primary keys now work (jeremyevans)
* Make class_table_inheritance plugin handle subclass associations better (jeremyevans) (#320)
=== 3.19.0 (2011-01-03)
* Handle Date and DateTime types in prepared statements when using the jdbc adapter (jeremyevans)
* Handle Date, DateTime, Time, SQL::Blob, true, and false in prepared statements when using the SQLite adapter (jeremyevans)
* Use varbinary(max) instead of image for the generic blob type on MSSQL (jeremyevans)
* Close prepared statements when disconnecting when using SQLite (jeremyevans)
* Allow reflecting on validations in the validation_class_methods plugin (jeremyevans)
* Allow passing a primary key value to the add_* association method (gucki)
* When typecasting model column values, check the classes of the new and existing values (jeremyevans)
* Improve type translation performance in the postgres, mysql, and sqlite adapters by using methods instead of procs (jeremyevans)
=== 3.18.0 (2010-12-01)
* Allow the user to control how the connection pool deals with attempts to access shards that aren't configured (jeremyevans)
* Typecast columns when creating model objects from JSON in the json_serializer plugin (jeremyevans)
* When parsing the schema for a model that uses an aliased table, use the unaliased table name (jeremyevans)
* When emulating schema methods such as drop_column on SQLite, recreate applicable indexes on the recreated table (jeremyevans)
* Only remove hook pairs that have been run successfully in the instance_hooks plugin (jeremyevans)
* Add reversible migration support to the migration extension (jeremyevans)
* Add to_dot extension, for producing visualizations of Dataset abstract syntax trees with Graphviz (jeremyevans)
* Switch to using manual type translation in the SQLite adapter (jeremyevans)
* Support :read_timeout option in the native mysql adapter (tmm1)
* Support :connect_timeout option in the native mysql and mysql2 adapters (tmm1)
=== 3.17.0 (2010-11-05)
* Ensure that the optimistic locking plugin increments the lock column when using Model#modified! (jfirebaugh)
* Correctly handle nil values in the xml_serializer plugin, instead of converting them to empty strings (george.haff) (#313)
* Use a default wait_timeout that's allowed on Windows for the mysql and mysql2 adapters (jeremyevans) (#314)
* Add support for connecting to MySQL over SSL using the :sslca, :sslkey, and related options (jeremyevans)
* Fix Database#each_server when used with jdbc or do connection strings without separate :adapter option (jeremyevans) (#312)
* Much better support in the AS400 JDBC subadapter (bhauff)
* Allow cloning of many_through_many associations (gucki, jeremyevans)
* In the nested_attributes plugin, don't make unnecessary update calls to modify associated objects that are about to be deleted (jeremyevans, gucki)
* Allow Dataset#(add|set)_graph_aliases to accept as hash values symbols and arrays with a single element (jeremyevans)
* Add Databse#views and #view_exists? to the Oracle adapter (gpheruson)
* Add Database#sql_log_level for changing the level at which SQL queries are logged (jeremyevans)
* Remove unintended use of prepared statements in swift adapter (jeremyevans)
* Fix logging in the swift PostgreSQL subadapter (jeremyevans)
=== 3.16.0 (2010-10-01)
* Support composite foreign keys for associations in the identity_map plugin (harukizaemon, jeremyevans) (#310)
* Handle INTERSECT and EXCEPT on Microsoft SQL Server 2005+ (jfirebaugh)
* Add :replace option to Database#create_language in the postgresql adapter (jeremyevans)
* Make rcte_tree plugin work when not all columns are selected (jeremyevans)
* Add swift adapter (jeremyevans)
* Fix literalization of DateTime objects on 1.9 for databases that support fractional seconds (jeremyevans)
=== 3.15.0 (2010-09-01)
* Make emulated alter_table tasks on SQLite correctly preserve foreign keys (DirtYiCE, jeremyevans)
* Add support for sequel_pg to the native postgres adapter when pg is used (jeremyevans)
* Make class MyModel < Sequel::Model(DB[:table]) reload safe (jeremyevans)
* Fix a possible error when using the do (DataObjects) adapter with postgres (jeremyevans)
* Handle a many_to_many :join_table option that uses an implicit alias (mluu, jeremyevans)
* Work around bug in Microsoft's SQL Server JDBC Adapter version 3.0 (jfirebaugh)
* Make eager graphing a model that uses an aliased table name work correctly (jeremyevans)
* Make class_table_inheritance plugin work with non integer primary keys on SQLite (jeremyevans, russm)
* Add :auto_increment field to column schema values on MySQL if the column is auto incrementing (dbd)
* Handle DSN-less ODBC connections better (Ricardo Ramalho)
* Exclude temporary tables when parsing the schema on PostgreSQL (jeremyevans) (#306)
* Add Mysql2 adapter (brianmario)
* Handle Mysql::Error exceptions when disconnecting in the MySQL adapter (jeremyevans)
* Make typecasting work correctly for attributes loaded lazily when using the lazy attributes plugin (jeremyevans)
=== 3.14.0 (2010-08-02)
* Handle OCIInvalidHandle errors when disconnecting in the Oracle adapter (jeremyevans)
* Allow calling Model.create_table, .create_table! and .create_table? with blocks containing the schema in the schema plugin (jfirebaugh)
* Fix handling of a :conditions options in the rcte plugin (mluu)
* Fix aggregate methods such as Dataset#sum and #avg on MSSQL on datasets with an order but no limit (mluu)
* Fix rename_table on MSSQL for case sensitive collations and schemas (mluu)
* Add a :single_root option to the tree plugin, for enforcing a single root value via a before_save hook (jfirebaugh)
* Add a Model#root? method to the tree plugin, for checking if the current node is a root (jfirebaugh)
* Add a :raise_on_failure option to Model#save to override the raise_on_save_failure setting (jfirebaugh)
* Handle class discriminator column names that are existing ruby method names in the single table inheritance plugin (jeremyevans)
* Fix times and datetimes when timezone support is used and you are loading a standard time when in daylight time or vice versa (gcampbell)
* Handle literalization of OCI8::CLOB objects in the native oracle adapter (jeremyevans)
* Raise a Sequel::Error instead of an ArgumentError if the migration current or target version does not exist (jeremyevans)
* Fix Database#schema on Oracle when the same table exists in multiple schemas (djwhitt)
* Fix Database#each_server when using a connection string to connect (jeremyevans)
* Make Model dataset's destroy method respect the model's use_transactions setting, instead of always using a transaction (jeremyevans)
* Add Database#adapter_scheme, for checking which adapter a Database uses (jeremyevans)
* Allow Dataset#grep to take :all_patterns, :all_columns, and :case_insensitive options (mighub, jeremyevans)
=== 3.13.0 (2010-07-01)
* Allow Model.find_or_create to take a block which is yielded the object to be created, if no object is found (zaius, jeremyevans)
* Make PlaceholderLiteralString a GenericExpression subclass (jeremyevans)
* Allow nil/NULL to be used as a CASE expression value (jeremyevans)
* Support bitwise operators on more databases (jeremyevans)
* Make PostgreSQL do bitwise xor instead of exponentiation for ^ operator (jeremyevans)
* Fix handling of tinyint(1) columns when connecting to MySQL via JDBC (jeremyevans)
* Handle arrays of two element arrays as filter hash values automatically (jeremyevans)
* Allow :frame option for windows to take a string that is used literally (jeremyevans)
* Support transaction isolation levels on PostgreSQL, MySQL, and MSSQL (jeremyevans)
* Support prepared transactions/two-phase commit on PostgreSQL, MySQL, and H2 (jeremyevans)
* Allow NULLS FIRST/LAST when ordering using the :nulls=>:first/:last option to asc and desc (jeremyevans)
* On PostgreSQL, if no :schema option is provided for #tables, #table_exists?, or #schema, assume all schemas except the default non-public ones (jeremyevans) (#305)
* Cache prepared statements when using the native sqlite driver, improving performance (jeremyevans)
* Add a Tree plugin for treating model objects as being part of a tree (jeremyevans, mwlang)
* Add a :methods_module association option, for choosing the module into which association methods are placed (jeremyevans)
* Add a List plugin for treating model objects as being part of a list (jeremyevans, aemadrid)
* Don't attempt to use class polymorphism in the class_table_inheritance plugin if no cti_key is defined (jeremyevans)
* Add a XmlSerializer plugin for serializing/deserializing model objects to/from XML (jeremyevans)
* Add a JsonSerializer plugin for serializing/deserializing model objects to/from JSON (jeremyevans)
* Handle unsigned integers in the schema dumper (jeremyevans)
=== 3.12.1 (2010-06-09)
* Make :encoding option work on MySQL even if config file specifies different encoding (jeremyevans) (#300)
=== 3.12.0 (2010-06-01)
* Add a :deferrable option to foreign_key for creating deferrable foreign keys (hydrow)
* Add a :join_table_block many_to_many association option used by the add/remove/remove_all methods (jeremyevans)
* Add an AssociationPks plugin that adds association_pks and association_pks= methods for *_to_many associations (jeremyevans)
* Add an UpdatePrimaryKey plugin that allows you to update the primary key of a model object (jeremyevans)
* Add a SkipCreateRefresh plugin that skips the refresh when saving new model objects (jeremyevans)
* Add a StringStripper plugin that strips strings before assigning them to model attributes (jeremyevans)
* Allow the :eager_loader association proc to accept a single hash instead of 3 arguments (jeremyevans)
* Add a Dataset#order_append alias for order_more, for consistency with order_prepend (jeremyevans)
* Add a Dataset#order_prepend method that adds to the end of an existing order (jeremyevans)
* Add a Sequel::NotImplemented exception class, use instead of NotImplementedError (jeremyevans)
* Correctly handle more than 2 hierarchy levels in the single table inheritance plugin (jeremyevans)
* Allow using a custom column value<->class mapping to the single_table_inheritance plugin (jeremyevans, tmm1)
* Handle SQL::Identifiers in the schema_dumper extension (jeremyevans) (#304)
* Make sure certain alter table operations clear the schema correctly on MySQL (jeremyevans) (#301)
* Fix leak of JDBC Statement objects when using transactions on JDBC on databases that support savepoints (jeremyevans)
* Add DatabaseDisconnectError support to the ODBC adapter (Joshua Hansen)
* Make :encoding option work on MySQL in some cases where it was ignored (jeremyevans) (#300)
* Make Model::Errors#on always return nil if there are no errors on that attribute (jeremyevans)
* When using multiple plugins that add before hooks, the order that the hooks are called may have changed (jeremyevans)
* The hook_class_methods plugin no longer skips later after hooks if earlier after hooks return false (jeremyevans)
* Add Model#set_fields and update_fields, similar to set_only and update_only but ignoring other keys in the hash (jeremyevans)
* Add Model.qualified_primary_key_hash, similar to primary_key_hash but with qualified columns (jeremyevans)
* Make Model::Errors#empty? handle attributes with empty error arrays (jeremyevans)
* No longer apply association options to join table dataset when removing all many_to_many associated objects (jeremyevans)
* Log the execution times of migrations to the database's loggers (jeremyevans)
* Add a TimestampMigrator that can work with migrations where versions are timestamps, and handle migrations applied out of order (jeremyevans)
* Completely refactor Sequel::Migrator, now a class instead of a module (jeremyevans)
* Save migration version after each migration, instead of after all migrations (jeremyevans)
* Raise an error if missing a migration version (jeremyevans)
* Raise an error if using a duplicate migration version (jeremyevans)
* Add a Sequel.migration DSL for defining migrations (jeremyevans)
* Add a sharding plugin giving Sequel::Model objects support for dealing with sharding (jeremyevans)
* Handle timestamp(N) with time zone data types (hone)
* Fix MSSQL temporary table creation, but watch out as it changes the table name (gpd, jeremyevans) (#299)
=== 3.11.0 (2010-05-03)
* Allow shared postgresql adapter to work with ruby 1.9 with the -Ku switch (golubev.pavel) (#298)
* Add support for connecting to MSSQL via JTDS in the JDBC adapter (jeremyevans)
* Support returning the number of rows updated/deleted on MSSQL when using the ADO adapter with an explicit :provider (jeremyevans)
* Support transactions in the ADO adapter if not using the default :provider (jeremyevans)
* Make Database#disconnect not raise an exception when using the unsharded single connection pool (jeremyevans)
* Attempt to handle JDBC connection problems in cases where driver auto loading doesn't work (e.g. Tomcat) (elskwid)
* Make native MySQL adapter's tinyint to boolean conversion only convert tinyint(1) columns and not larger tinyint columns (roland.swingler) (#294)
* Fix use of limit with distinct on Microsoft SQL Server (jeremyevans) (#297)
* Correctly swallow errors when using :ignore_index_errors in Database#create_table when using unsupported indexes (jeremyevans) (#295)
* Fix insert returning the autogenerated key when using the 5.1.12 MySQL JDBC driver (viking)
* Consider number/numeric/decimal columns with a 0 scale to be integer columns (e.g. numeric(10, 0)) (jeremyevans, QaDes)
* Fix Database#rename_table on Microsoft SQL Server (rohit.namjoshi) (#293)
* Add Dataset#provides_accurate_rows_matched?, for seeing if update and delete are likely to return correct numbers (jeremyevans)
* Add require_modification to Sequel::Model, for checking that model instance updating and deleting affects a single row (jeremyevans)
* Fix leak of ResultSets when getting metadata in the jdbc adapter (jrun)
* Make Dataset#filter and related methods just clone receiver if given an empty argument, such as {}, [], or '' (jeremyevans)
* Add instance_filters plugin, for adding arbitrary filters when updating/destroying the instance (jeremyevans)
* No longer create the #{plugin}_opts methods for plugins (jeremyevans)
* Support :auto_vacuum, :foreign_keys, :synchronous, and :temp_store Database options on SQLite, for thread-safe PRAGMA setting (jeremyevans)
* Add foreign_keys accessor to SQLite Database objects (enabled by default), which modifies the foreign_keys PRAGMA available in 3.6.19+ (jeremyevans)
* Add an Database#sqlite_version method when connecting to SQLite, used to determine feature support (jeremyevans)
* Fix rolling back transactions when connecting to Oracle via JDBC (jeremyevans)
* Fix syntax errors when connecting to MSSQL via the dbi adapter (jeremyevans) (#292)
* Add support for an :after_connect option when connection, called with each new connection made (jeremyevans)
* Add support for a :test option when connecting to be automatically test the connection (jeremyevans)
* Add Dataset#select_append, which always appends to the existing SELECTed columns (jeremyevans)
* Emulate DISTINCT ON on MySQL using GROUP BY (jeremyevans)
* Make MSSQL shared adapter emulate set_column_null alter table op better with types containing sizes (jeremyevans) (#291)
* Add :config_default_group and :config_local_infile options to the native MySQL adapter (jeremyevans)
* Add log_warn_duration attribute to Database, queries that take longer than it will be logged at warn level (jeremyevans)
* Switch Database logging to use log_yield instead of log_info, queries that raise errors are now logged at error level (jeremyevans)
* Update active_model plugin to work with the ActiveModel::Lint 3.0.0beta2 specs (jeremyevans)
* Support JNDI connection strings in the JDBC adapter (jrun)
=== 3.10.0 (2010-04-02)
* Make one_to_one setter and *_to_many remove_all methods apply the association options (jeremyevans)
* Make nested_attributes plugin handle invalid many_to_one associations better (john_firebaugh)
* Remove private methods from Sequel::BasicObject on ruby 1.8 (i.e. most Kernel methods) (jeremyevans)
* Add Sequel::BasicObject.remove_methods!, useful on 1.8 if libraries required after Sequel add methods to Object (jeremyevans)
* Change Sequel.connect with a block to return the block's value (jonas11235)
* Add an rcte_tree plugin, which uses recursive common table expressions for loading trees stored as adjacency lists (jeremyevans)
* Make typecast_on_load plugin also typecast when refreshing the object (either explicitly or implicitly after creation) (jeremyevans)
* Fix schema parsing and dumping of tinyint columns when connecting to MySQL via the do adapter (ricardochimal)
* Fix transactions when connecting to Oracle via JDBC (jeremyevans)
* Fix plugin loading when plugin module name is the same as an already defined top level constant (jeremyevans)
* Add an AS400 JDBC subadapter (need jt400.jar in classpath) (jeremyevans, bhauff)
* Fix the emulated MSSQL offset support when core extensions are not used (jeremyevans)
* Make Sequel::BasicObject work correctly on Rubinius (kronos)
* Add the :eager_loader_key option to associations, useful for custom eager loaders (jeremyevans)
* Dataset#group_and_count no longer orders by the count (jeremyevans)
* Fix Dataset#limit on MSSQL 2000 (jeremyevans)
* Support eagerly load nested associations when lazily loading *_to_one associations using the :eager option (jeremyevans)
* Fix the one_to_one setter to work with a nil argument (jeremyevans)
* Cache one_to_one associations like many_to_one associations instead of one_to_many associations (jeremyevans)
* Use the singular form for one_to_one association names instead of the plural form (john_firebaugh)
* Add real one_to_one associations, using the :one_to_one option of one_to_many is now an error (jeremyevans)
* Add Model#lock! which uses Dataset#for_update to lock model rows (jeremyevans)
* Add Dataset#for_update as a standard dataset method (jeremyevans)
* Add composition plugin, simlar to ActiveRecord's composed_of (jeremyevans)
* Combine multiple complex expressions for simpler SQL and object tree (jeremyevans)
* Add Dataset#first_source_table, for the unaliased version of the table for the first source (jeremyevans)
* Raise a more explicit error if attempting to use the sqlite adapter with sqlite3 instead of sqlite3-ruby (jeremyevans)
=== 3.9.0 (2010-03-04)
* Allow loading adapters and extensions from outside of the Sequel lib directory (jeremyevans)
* Make limit and offset work as bound variables in prepared statements (jeremyevans)
* In the single_table_inheritance plugin, handle case where the sti_key is nil or '' specially (jeremyevans) (#287)
* Handle IN/NOT IN with an empty array (jeremyevans)
* Emulate IN/NOT IN with multiple columns where the database doesn't support it and a dataset is given (jeremyevans)
* Add Dataset#unused_table_alias, for generating a table alias that has not yet been used in the query (jeremyevans)
* Support an empty database argument in bin/sequel, useful for testing things without a real database (jeremyevans)
* Support for schemas and aliases when eager graphing (jeremyevans)
* Handle using an SQL::Identifier as an 4th option to Dataset#join_table (jeremyevans)
* Move gem spec from Rakefile to a .gemspec file, for compatibility with gem build and builder (jeremyevans) (#285)
* Fix MSSQL 2005+ offset emulation on ruby 1.9 (jeremyevans)
* Make active_model plugin work with ActiveModel 3.0 beta Lint specs, now requires active_model (jeremyevans)
* Correctly create foreign key constraints on MySQL with the InnoDB engine, but you must specify the :key option (jeremyevans)
* Add an optimistic_locking plugin for models, similar to ActiveRecord's optimistic locking support (jeremyevans)
* Handle implicitly qualified symbols in UPDATE statements, useful for updating joined datasets (jeremyevans)
* Have schema_dumper extension pass options hash to Database#tables (jeremyevans) (#283)
* Make all internal uses of require thread-safe (jeremyevans)
* Refactor connection pool into 4 separate pools, increase performance for unsharded setups (jeremyevans)
* Change a couple instance_evaled lambdas into procs, for 1.9.2 compatibility (jeremyevans)
* Raise error message earlier if DISTINCT ON is used on SQLite (jeremyevans)
* Speed up prepared statements on SQLite (jeremyevans)
* Correctly handle ODBC timestamps when database_timezone is nil (jeremyevans)
* Add Sequel::ValidationFailed#errors (tmm1)
=== 3.8.0 (2010-01-04)
* Catch cases in the postgres adapter where exceptions weren't converted or raised appropriately (jeremyevans)
* Don't double escape backslashes in string literals in the mssql shared adapter (john_firebaugh)
* Fix order of ORDER and HAVING clauses in the mssql shared adapter (mluu)
* Add validates_type to the validation_helpers plugin (mluu)
* Attempt to detect database disconnects in the JDBC adapter (john_firebaugh)
* Add Sequel::SQL::Expression#==, so arbtirary expressions can be compared by value (dlee)
* Respect the :size option for the generic File type on MySQL to create tinyblob, mediumblob, and longblob (ibc)
* Don't use the OUTPUT clause on SQL Server versions that don't support it (pre-2005) (jeremyevans) (#281)
* Raise DatabaseConnectionErrors in the single-threaded connection pool if unable to connect (jeremyevans)
* Fix handling of non-existent server in single-threaded connection pool (jeremyevans)
* Default to using mysqlplus driver in the native mysql adapter, fall back to mysql driver (ibc, jeremyevans)
* Handle 64-bit integers in JDBC prepared statements (paulfras)
* Improve blob support when using the H2 JDBC subadapter (nullstyle, jeremyevans, paulfras)
* Add Database#each_server, which yields a new Database object for each server in the connection pool which is connected to only that server (jeremyevans)
* Add Dataset#each_server, which yields a dataset for each server in the connection pool which is will execute on that server (jeremyevans)
* Remove meta_eval and metaclass private methods from Sequel::Metaprogramming (jeremyevans)
* Merge Dataset::FROM_SELF_KEEP_OPTS into Dataset::NON_SQL_OPTIONS (jeremyevans)
* Add Database#remove_servers for removing servers from the pool on the fly (jeremyevans)
* When disconnecting servers, if there are any connections to the server currently in use, schedule them to be disconnected (jeremyevans)
* Allow disconnecting specific server(s)/shard(s) in Database#disconnect via a :servers option (jeremyevans)
* Handle multiple statements in a single query in the native MySQL adapter in all cases, not just when selecting via Dataset#each (jeremyevans)
* In the boolean_readers plugin, don't raise an error if the model's columns can't be determined (jeremyevans)
* In the identity_map plugin, remove instances from the cache if they are deleted/destroyed (jeremyevans)
* Add Database#add_servers, for adding new servers/shards on the fly (chuckremes, jeremyevans)
=== 3.7.0 (2009-12-01)
* Add Dataset#sequence to the shared Oracle Adapter, for returning autogenerated primary key values on insert (jeremyevans) (#280)
* Bring support for modifying joined datasets into Sequel proper, supported on MySQL and PostgreSQL (jeremyevans)
* No longer use native autoreconnection in the mysql adapter (jeremyevans)
* Add NULL, NOTNULL, TRUE, SQLTRUE, FALSE, and SQLFALSE constants (jeremyevans)
* Add Dataset #select_map, #select_order_map, and #select_hash (jeremyevans)
* Make Dataset#group_and_count handle arguments other than Symbols (jeremyevans)
* Add :only_if_modified option to validates_unique method in validation_helpers plugin (jeremyevans)
* Allow specifying the dataset alias via :alias option when using union/intersect/except (jeremyevans)
* Allow Model#destroy to take an options hash and respect a :transaction option (john_firebaugh)
* If a transaction is being used, raise_on_save_failure is false, and a before hook returns false, rollback the transaction (john_firebaugh, jeremyevans)
* In the schema_dumper, explicitly specify the :type option if it isn't Integer (jeremyevans)
* On postgres, use bigserial type if :type=>Bignum is given as an option to primary_key (jeremyevans)
* Use READ_DEFAULT_GROUP in the mysql adapter to load the options in the client section of the my.cnf file (crohr)
=== 3.6.0 (2009-11-02)
* Make the MSSQL shared adapter correctly parse the column schema information for tables in the non-default database schema (rohit.namjoshi)
* Use save_changes instead of save when updating existing associated objects in the nested_attributes plugin (jeremyevans)
* Allow Model#save_changes to accept an option hash that is passed to save, so you can save changes without validating (jeremyevans)
* Make nested_attributes plugin add newly created objects to cached association array immediately (jeremyevans)
* Make add_ association method not add the associated object to the cached array if it's already there (jeremyevans)
* Add Model#modified! for explicitly marking an object as modified, so save_changes/update will run callbacks even if no columns have been modified (jeremyevans)
* Add support for a :fields option in the nested attributes plugin, and only allow updating of the fields specified (jeremyevans)
* Don't allow modifying keys related to the association when updating existing objects in the nested_attributes plugin (jeremyevans)
* Add associated_object_keys method to AssociationReflection objects, specifying the key(s) in the associated model table related to the association (jeremyevans)
* Support the memcached protocol in the caching plugin via the new :ignore_exceptions option (EppO, jeremyevans)
* Don't modify array with a string and placeholders passed to Dataset#filter or related methods (jeremyevans)
* Speed up Amalgalite adapter (copiousfreetime)
* Fix bound variables on PostgreSQL when using nil and potentially other values (jeremyevans)
* Allow easier overriding of default options used in the validation_helpers plugin (jeremyevans)
* Have Dataset#literal_other call sql_literal on the object if it responds to it (heda, michaeldiamond)
* Fix Dataset#explain in the amalgalite adapter (jeremyevans)
* Have Model.table_name respect table aliases (jeremyevans)
* Allow marshalling of saved model records after calling #marshallable! (jeremyevans)
* one_to_many association methods now make sure that the removed object is currently associated to the receiver (jeremyevans)
* Model association add_ and remove_ methods now have more descriptive error messages (jeremyevans)
* Model association add_ and remove_ methods now make sure passed object is of the correct class (jeremyevans)
* Model association remove_ methods now accept a primary key value and disassociate the associated model object (natewiger, jeremyevans)
* Model association add_ methods now accept a hash and create a new associated model object (natewiger, jeremyevans)
* Dataset#window for PostgreSQL datasets now respects previous windows (jeremyevans)
* Dataset#simple_select_all? now ignores options that don't affect the SQL being issued (jeremyevans)
* Account for table aliases in eager_graph (mluu)
* Add support for MSSQL clustered index creation (mluu)
* Implement insert_select in the MSSQL adapter via OUTPUT. Can be disabled via disable_insert_output. (jfirebaugh, mluu)
* Correct error handling when beginning a transaction fails (jfirebaugh, mluu)
* Correct JDBC binding for Time objects in prepared statements (jfirebaugh, jeremyevans)
* Emulate JOIN USING clause poorly using JOIN ON if the database doesn't support JOIN USING (e.g. MSSQL, H2) (jfirebaugh, jeremyevans)
* Support column aliases in Dataset#group_and_count (jfirebaugh)
* Support preparing insert statements of the form insert(1,2,3) and insert(columns, values) (jfirebaugh)
* Fix add_index for tables in non-default schema (jfirebaugh)
* Allow named placeholders in placeholder literal strings (jeremyevans)
* Allow the force_encoding plugin to work when refreshing (jeremyevans)
* Add Dataset#bind for setting bound variable values before calling #call (jeremyevans)
* Add additional join methods to Dataset: (cross|natural|(natural_)?(full|left|right))_join (jeremyevans)
* Fix use a dataset aggregate methods (e.g. sum) on limited/grouped/etc. datasets (jeremyevans)
* Clear changed_columns when saving new model objects with a database adapter that supports insert_select, such as postgres (jeremyevans)
* Fix Dataset#replace with default values on MySQL, and respect insert-related options (jeremyevans)
* Fix Dataset#lock on PostgreSQL (jeremyevans)
* Fix Dataset#explain on SQLite (jeremyevans)
* Add Dataset#use_cursor to the native postgres adapter, for processing large datasets (jeremyevans)
* Don't ignore Class.inherited in Sequel::Model.inherited (antage) (#277)
* Optimize JDBC::MySQL::DatabaseMethods#last_insert_id to prevent additional queries (tmm1)
* Fix use of MSSQL with ruby 1.9 (cult hero)
* Don't try to load associated objects when the current object has NULL for one of the key fields (jeremyevans)
* No longer require GROUP BY to use HAVING, except on SQLite (jeremyevans)
* Add emulated support for the lack of multiple column IN/NOT IN support in MSSQL and SQLite (jeremyevans)
* Add emulated support for #ilike on MSSQL and H2 (jeremyevans)
* Add a :distinct option for all associations, which uses the SQL DISTINCT clause (jeremyevans)
* Don't require :: prefix for constant lookups in instance_evaled virtual row blocks on ruby 1.9 (jeremyevans)
=== 3.5.0 (2009-10-01)
* Correctly literalize timezones in timestamps when using Oracle (jeremyevans)
* Add class_table_inheritance plugin, supporting inheritance in the database using a table-per-model-class approach (jeremyevans)
* Allow easier overriding of model code to insert and update individual records (jeremyevans)
* Allow graphing to work on previously joined datasets, and eager graphing of models backed by joined datasets (jeremyevans)
* Fix MSSQL emulated offset support for datasets with row_procs (e.g. Model datasets) (jeremyevans)
* Support composite keys with set_primary_key when called with an array of multiple symbols (jeremyevans)
* Fix select_more and order_more to not affect receiver (tamas.denes, jeremyevans)
* Support composite keys in model associations, including many_through_many plugin support (jeremyevans)
* Add the force_encoding plugin for forcing encoding of strings for models (requires ruby 1.9) (jeremyevans)
* Support DataObjects 0.10 (previous DataObjects versions are now unsupported) (jeremyevans)
* Allow the user to specify the ADO connection string via the :conn_string option (jeremyevans)
* Add thread_local_timezones extension for allow per-thread overrides of the global timezone settings (jeremyevans)
* Add named_timezones extension for using named timezones such as "America/Los_Angeles" using TZInfo (jeremyevans)
* Pass through unsigned/elements/size and other options when altering columns on MySQL (tmm1)
* Replace Dataset#virtual_row_block_call with Sequel.virtual_row (jeremyevans)
* Allow Dataset #delete, #update, and #insert to respect existing WITH clauses on MSSQL (dlee, jeremyevans)
* Add touch plugin, which adds Model#touch for updating an instance's timestamp, as well as touching associations when an instance is updated or destroyed (jeremyevans)
* Add sql_expr extension, which adds the sql_expr to all objects, giving them easy access to Sequel's DSL (jeremyevans)
* Add active_model plugin, which gives Sequel::Model an ActiveModel compliant API, passes the ActiveModel::Lint tests (jeremyevans)
* Fix MySQL commands out of sync error when using queries with multiple result sets without retrieving all result sets (jeremyevans)
* Allow splitting of multiple result sets into separate arrays when using multiple statements in a single query in the native MySQL adapter (jeremyevans)
* Don't include primary key indexes when parsing MSSQL indexes on JDBC (jeremyevans)
* Make Dataset#insert_select return nil on PostgreSQL if disable_insert_returning is used (jeremyevans)
* Speed up execution of prepared statements with bound variables on MySQL (ibc@aliax.net)
* Add association_dependencies plugin, for deleting, destroying, or nullifying associated objects when destroying a model object (jeremyevans)
* Add :validate association option, set to false to not validate when implicitly saving associated objects (jeremyevans)
* Add subclasses plugin, for recording all of a models subclasses and descendent classes (jeremyevans)
* Add looser_typecasting extension, for using .to_f and .to_i instead of Kernel.Float and Kernel.Integer when typecasting floats and integers (jeremyevans)
* Catch database errors when preparing statements or setting variable values when using the native MySQL adapter (jeremyevans)
* Add typecast_on_load plugin, for fixing bad database typecasting when loading model objects (jeremyevans)
* Detect more types of MySQL disconnection errors (jeremyevans)
* Add Sequel.convert_exception_class for wrapping exceptions (jeremyevans)
* Model#modified? now always considers new records as modified (jeremyevans)
* Typecast before checking current model attribute value, instead of after (jeremyevans)
* Don't attempt to use unparseable defaults as literals when dumping the schema for a MySQL database (jeremyevans)
* Handle MySQL enum defaults in the schema dumper (jeremyevans)
* Support Database#server_version on MSSQL (dlee, jeremyevans)
* Support updating and deleting joined datasets on MSSQL (jfirebaugh)
* Support the OUTPUT SQL clause on MSSQL delete, insert, and update statements (jfirebaugh)
* Refactor generation of delete, insert, select, and update statements (jfirebaugh, jeremyevans)
* Do a better job of parsing defaults on MSSQL (jfirebaugh)
=== 3.4.0 (2009-09-02)
* Allow datasets without tables to work correctly on Oracle (mikegolod)
* Add #invert, #asc, and #desc to OrderedExpression (dlee)
* Allow validates_unique to take a block used to scope the uniqueness constraint (drfreeze, jeremyevans)
* Automatically save a new many_to_many associated object when associating the object via add_* (jeremyevans)
* Add a nested_attributes plugin for modifying associated objects directly through a model object (jeremyevans)
* Add an instance_hooks plugin for adding hooks to specific model instances (jeremyevans)
* Add a boolean_readers plugin for creating attribute? methods for boolean columns (jeremyevans)
* Add Dataset#ungrouped which removes existing grouping (jeremyevans)
* Make Dataset#group with nil or no arguments to remove existing grouping (dlee)
* Fix using multiple emulated ALTER TABLE statements (e.g. drop_column) in a single alter_table block on SQLite (jeremyevans)
* Don't allow inserting on a grouped dataset or a dataset that selects from multiple tables (jeremyevans)
* Allow class Item < Sequel::Model(DB2) to work (jeremyevans)
* Add Dataset#truncate for truncating tables (jeremyevans)
* Add Database#run method for executing arbitrary SQL on a database (jeremyevans)
* Handle index parsing correctly for tables in a non-default schema on JDBC (jfirebaugh)
* Handle unique index parsing correctly when connecting to MSSQL via JDBC (jfirebaugh)
* Add support for converting Time/DateTime to local or UTC time upon storage, retrieval, or typecasting (jeremyevans)
* Accept a hash when typecasting values to date, time, and datetime types (jeremyevans)
* Make JDBC adapter prepared statements support booleans, blobs, and potentially any type of object (jfirebaugh)
* Refactor the inflection support and modify the default inflections (jeremyevans, dlee)
* Make the serialization and lazy_attribute plugins add accessor methods to modules included in the class (jeremyevans)
* Make Database#schema on JDBC include a :column_size entry specifying the maximum length/precision for the column (jfirebaugh)
* Make Database#schema on JDBC accept a :schema option (dlee)
* Fix Dataset#import when called with a dataset (jeremyevans)
* Give a much more descriptive error message if the mysql.rb driver is detected (jeremyevans)
* Make postgres adapter work with a modified postgres-pr that raises PGError (jeremyevans)
* Make ODBC adapter respect Sequel.datetime_class (jeremyevans)
* Add support for generic concepts of CURRENT_{DATE,TIME,TIMESTAMP} (jeremyevans)
* Add a timestamps plugin for automatically creating hooks for create and update timestamps (jeremyevans)
* Add support for serializing to json (derdewey)
=== 3.3.0 (2009-08-03)
* Add an assocation_proxies plugin that uses proxies for associations (jeremyevans)
* Have the add/remove/remove_all methods take additional arguments and pass them to the internal methods (clivecrous)
* Move convert_tinyint_to_bool method from Sequel to Sequel::MySQL (jeremyevans)
* Model associations now default to associating to classes in the same scope (jeremyevans, nougad) (#274)
* Add Dataset#unlimited, similar to unfiltered and unordered (jeremyevans)
* Make Dataset#from_self take an options hash and respect an :alias option, giving the alias to use (Phrogz)
* Make the JDBC adapter accept a :convert_types option to turn off Java type conversion and double performance (jeremyevans)
* Slight increase in ConnectionPool performance (jeremyevans)
* SQL::WindowFunction can now be aliased/casted etc. just like SQL::Function (jeremyevans)
* Model#save no longer attempts to update primary key columns (jeremyevans)
* Sequel will now unescape values provided in connection strings (e.g. ado:///db?host=server%5cinstance) (jeremyevans)
* Significant improvements to the ODBC and ADO adapters in general (jeremyevans)
* The ADO adapter no longer attempts to use database transactions, since they never worked (jeremyevans)
* Much better support for Microsoft SQL Server using the ADO, ODBC, and JDBC adapters (jeremyevans)
* Support rename_column, set_column_null, set_column_type, and add_foreign_key on H2 (jeremyevans)
* Support adding a column with a primary key or unique constraint to an existing table on SQLite (jeremyevans)
* Support altering a column's type, null status, or default on SQLite (jeremyevans)
* Fix renaming a NOT NULL column without a default on MySQL (nougad, jeremyevans) (#273)
* Don't swallow DatabaseConnectionErrors when creating model subclasses (tommy.midttveit)
=== 3.2.0 (2009-07-02)
* In the STI plugin, don't overwrite the STI field if it is already set (jeremyevans)
* Add support for Common Table Expressions, which use the SQL WITH clause (jeremyevans)
* Add SQL::WindowFunction, expand virtual row blocks to support them and other constructions (jeremyevans)
* Add Model#autoincrementing_primary_key, for when the autoincrementing key isn't the same as the primary key (jeremyevans)
* Add Dataset#ungraphed, to remove the splitting of results into subhashes or associated records (jeremyevans)
* Support :opclass option for PostgreSQL indexes (tmi, jeremyevans)
* Make parsing of server's version more reliable for PostgreSQL (jeremyevans)
* Add Dataset#qualify, which is qualify_to with a first_source default (jeremyevans)
* Add :ruby_default to parsed schema information, which contains a ruby object representing the database default (jeremyevans)
* Fix changing a column's name, type, or null status on MySQL when column has a string default (jeremyevans)
* Remove Dataset#to_table_reference protected method, no longer used (jeremyevans)
* Fix thread-safety issue in stored procedure code (jeremyevans)
* Remove SavepointTransactions module, integrate into Database code (jeremyevans)
* Add supports_distinct_on? method (jeremyevans)
* Remove SQLStandardDateFormat, replace with requires_sql_standard_datetimes? method (jeremyevans)
* Remove UnsupportedIsTrue module, replace with supports_is_true? method (jeremyevans)
* Remove UnsupportedIntersectExcept(All)? modules, replace with methods (jeremyevans)
* Make Database#indexes work on PostgreSQL versions prior to 8.3 (tested on 7.4) (jeremyevans)
* Fix bin/sequel using a YAML file on 1.9 (jeremyevans)
* Allow connection pool options to be specified in connection string (jeremyevans)
* Handle :user and :password options in the JDBC adapter (jeremyevans)
* Fix warnings when using the ODBC adapter (jeremyevans)
* Add opening_databases.rdoc file for describing how to connect to a database (mwlang, jeremyevans)
* Significantly increase JDBC select performance (jeremyevans)
* Slightly increase SQLite select performance using the native adapter (jeremyevans)
* Majorly increase MySQL select performance using the native adapter (jeremyevans)
* Pass through unsigned/elements/size and other options when altering columns on MySQL (tmm1)
* Allow on_duplicate_key_update to affect Dataset#insert on MySQL (tmm1)
* Support using a given table and column to store schema versions, using new Migrator.run method (bougyman, jeremyevans)
* Fix foreign key table constraints on MySQL (jeremyevans)
* Remove Dataset#table_exists?, use Database#table_exists? instead (jeremyevans)
* Fix graphing of datasets with dataset sources (jeremyevans) (#271)
* Raise a Sequel::Error if Sequel.connect is called with something other than a Hash or String (jeremyevans) (#272)
* Add -N option to bin/sequel to not test the database connection (jeremyevans)
* Make Model.grep call Dataset#grep instead of Enumerable#grep (jeremyevans)
* Support the use of Regexp as first argument to StringExpression.like (jeremyevans)
* Fix Database#indexes on PostgreSQL when the schema used is a symbol (jeremyevans)
=== 3.1.0 (2009-06-04)
* Require the classes match to consider an association a reciprocal (jeremyevans) (#270)
* Make Migrator work correctly with file names like 001_873465873465873465_some_name.rb (jeremyevans) (#267)
* Add Dataset#qualify_to and #qualify_to_first_source, for qualifying unqualified identifiers in the dataset (jeremyevans)
* All the use of #sql_subscript on most SQL::* objects, and support non-integer subscript values (jeremyevans)
* Add reflection.rdoc file which explains and gives examples of many of Sequel's reflection methods (jeremyevans)
* Add many_through_many plugin, allowing you to construct an association to multiple objects through multiple join tables (jeremyevans)
* Add the :cartesian_product_number option to associations, for specifying if they can cause a cartesian product (jeremyevans)
* Make :eager_graph association option work correctly when lazily loading many_to_many associations (jeremyevans)
* Make eager_unique_table_alias consider joined tables as well as tables in the FROM clause (jeremyevans)
* Make add_graph_aliases work correctly even if set_graph_aliases hasn't been used (jeremyevans)
* Fix using :conditions that are a placeholder string in an association (e.g. :conditions=>['a = ?', 42]) (jeremyevans)
* On MySQL, make Dataset#insert_ignore affect #insert as well as #multi_insert and #import (jeremyevans, tmm1)
* Add -t option to bin/sequel to output the full backtrace if an exception is raised (jeremyevans)
* Make schema_dumper extension ignore errors with indexes unless it is dumping in the database-specific type format (jeremyevans)
* Don't dump partial indexes in the MySQL adapter (jeremyevans)
* Add :ignore_index_errors option to Database#create_table and :ignore_errors option to Database#add_index (jeremyevans)
* Make graphing a complex dataset work correctly (jeremyevans)
* Fix MySQL command out of sync errors, disconnect from database if they occur (jeremyevans)
* In the schema_dumper extension, do a much better job of parsing defaults from the database (jeremyevans)
* On PostgreSQL, assume the public schema if one is not given and there is no default in Database#tables (jeremyevans)
* Ignore a :default value if creating a String :text=>true or File column on MySQL, since it doesn't support defaults on text/blob columns (jeremyevans)
* On PostgreSQL, do not raise an error when attempting to reset the primary key sequence for a table without a primary key (jeremyevans)
* Allow plugins to have a configure method that is called on every attempt to load them (jeremyevans)
* Attempting to load an already loaded plugin no longer calls the plugin's apply method (jeremyevans)
* Make plugin's plugin_opts methods return an array of arguments if multiple arguments were given, instead of just the first argument (jeremyevans)
* Keep track of loaded plugins at Model.plugins, allows plugins to depend on other plugins (jeremyevans)
* Make Dataset#insert on PostgreSQL work with static SQL (jeremyevans)
* Add lazy_attributes plugin, for creating attributes that can be lazily loaded from the database (jeremyevans)
* Add tactical_eager_loading plugin, similar to DataMapper's strategic eager loading (jeremyevans)
* Don't raise an error when loading a plugin with DatasetMethods where none of the methods are public (jeremyevans)
* Add identity_map plugin, for creating temporary thread-local identity maps with some caching (jeremyevans)
* Support savepoints when using MySQL and SQLite (jeremyevans)
* Add -C option to bin/sequel that copies one database to another (jeremyevans)
* In the schema_dumper extension, don't include defaults that contain literal strings unless the DBs are the same (jeremyevans)
* Only include valid non-partial indexes of simple column references in the PostgreSQL adapter (jeremyevans)
* Add -h option to bin/sequel for outputting the usage, alias for -? (jeremyevans)
* Add -d and -D options to bin/sequel for dumping schema migrations (jeremyevans)
* Support eager graphing for model tables that lack primary keys (jeremyevans)
* Add Model.create_table? to the schema plugin, similar to Database#create_table? (jeremyevans)
* Add Database#create_table?, which creates the table if it doesn't already exist (jeremyevans)
* Handle ordered and limited datasets correctly when using UNION, INTERSECT, or EXCEPT (jeremyevans)
* Fix unlikely threading bug with class level validations (jeremyevans)
* Make the schema_dumper extension dump tables in alphabetical order in migrations (jeremyevans)
* Add Sequel.extension method for loading extensions, so you don't have to use require (jeremyevans)
* Allow bin/sequel to respect multiple -L options instead of ignoring all but the last one (jeremyevans)
* Add :command_timeout and :provider options to ADO adapter (hgimenez)
* Fix exception messages when Sequel.string_to_* fail (jeremyevans)
* Fix String :type=>:text generic type in the Firebird adapter (wishdev)
* Add Sequel.amalgalite adapter method (jeremyevans)
=== 3.0.0 (2009-05-04)
* Remove dead threads from connection pool if the pool is full and a connection is requested (jeremyevans)
* Add autoincrementing primary key support in the Oracle adapter, using a sequence and trigger (jeremyevans, Mike Golod)
* Make Model#save use the same server it uses for saving as for retrieving the saved record (jeremyevans)
* Add Database#database_type method, for identifying which type of database the object is connecting to (jeremyevans)
* Add ability to reset primary key sequences in the PostgreSQL adapter (jeremyevans)
* Fix parsing of non-simple sequence names (that contain uppercase, spaces, etc.) in the PostgreSQL adapter (jeremyevans)
* Support dumping indexes in the schema_dumper extension (jeremyevans)
* Add index parsing to PostgreSQL, MySQL, SQLite, and JDBC adapters (jeremyevans)
* Correctly quote SQL Array references, and handle qualified identifiers with them (e.g. :table__column.sql_subscript(1)) (jeremyevans)
* Allow dropping an index with a name different than the default name (jeremyevans)
* Allow Dataset#from to remove existing FROM tables when called without an argument, instead of raising an error later (jeremyevans)
* Fix string quoting on Oracle so it doesn't double backslashes (jeremyevans)
* Alias the count function call in Dataset#count, fixes use on MSSQL (akitaonrails, jeremyevans)
* Allow QualifiedIdentifiers to be qualified, to allow :column.qualify(:table).qualify(:schema) (jeremyevans)
* Allow :db_type=>'mssql' option to be respected when using the DBI adapter (akitaonrails)
* Add schema_dumper extension, for dumping schema of tables (jeremyevans)
* Allow generic database types specified as ruby types to take options (jeremyevans)
* Change Dataset#exclude to invert given hash argument, not negate it (jeremyevans)
* Make Dataset#filter and related methods treat multiple arguments more intuitively (jeremyevans)
* Fix full text searching with multiple search terms on MySQL (jeremyevans)
* Fix altering a column name, type, default, or NULL/NOT NULL status on MySQL (jeremyevans)
* Fix index type syntax on MySQL (jeremyevans)
* Add temporary table support, via :temp option to Database#create_table (EppO, jeremyevans)
* Add Amalgalite adapter (jeremyevans)
* Remove Sequel::Metaprogramming#metaattr_accessor and metaattr_reader (jeremyevans)
* Remove Dataset#irregular_function_sql (jeremyevans)
* Add Dataset#full_text_sql to the MySQL adapter (dusty)
* Fix schema type parsing of decimal types on MySQL (jeremyevans)
* Make Dataset#quote_identifier work with SQL::Identifiers (jeremyevans)
* Remove methods and features deprecated in 2.12.0 (jeremyevans)
=== 2.12.0 (2009-04-03)
* Deprecate Java::JavaSQL::Timestamp#usec (jeremyevans)
* Fix Model.[] optimization introduced in 2.11.0 for databases that don't use LIMIT (jacaetevha)
* Don't use the model association plugin if SEQUEL_NO_ASSOCIATIONS constant or environment variable is defined (jeremyevans)
* Don't require core_sql if SEQUEL_NO_CORE_EXTENSIONS constant or environment variable is defined (jeremyevans)
* Add validation_helpers model plugin, which adds instance level validation support similar to previously standard validations, with a different API (jeremyevans)
* Split multi_insert into 2 methods with separate APIs, multi_insert for hashes, import for arrays of columns and values (jeremyevans)
* Deprecate Dataset#transform and Model.serialize, and model serialization plugin (jeremyevans)
* Add multi_insert_update to the MySQL adapter, used for setting specific update behavior when an error occurs when using multi_insert (dusty)
* Add multi_insert_ignore to the MySQL adapter, used for skipping errors on row inserts when using multi_insert (dusty)
* Add Sequel::MySQL.convert_invalid_date_time accessor for dealing with dates like "0000-00-00" and times like "25:00:00" (jeremyevans, epugh)
* Eliminate internal dependence on core_sql extensions (jeremyevans)
* Deprecate Migration and Migrator, require 'sequel/extensions/migration' if you want them (jeremyevans)
* Denamespace Sequel::Error decendants (e.g. use Sequel::Rollback instead of Sequel::Error::Rollback) (jeremyevans)
* Deprecate Error::InvalidTransform, Error::NoExistingFilter, and Error::InvalidStatement (jeremyevans)
* Deprecate Dataset#[] when called without an argument, and Dataset#map when called with an argument and a block (jeremyevans)
* Fix aliasing columns in the JDBC adapter (per.melin) (#263)
* Make Database#rename_table remove the cached schema entry for the table (jeremyevans)
* Make Database schema sql methods private (jeremyevans)
* Deprecate Database #multi_threaded? and #logger (jeremyevans)
* Make Dataset#where always affect the WHERE clause (jeremyevans)
* Deprecate Object#blank? and related extensions, require 'sequel/extensions/blank' to get them back (jeremyevans)
* Move lib/sequel_core into lib/sequel and lib/sequel_model into lib/sequel/model (jeremyevans)
* Remove Sequel::Schema::SQL module, move methods into Sequel::Database (jeremyevans)
* Support creating and dropping schema qualified views (jeremyevans)
* Fix saving a newly inserted record in an after_create or after_save hook (jeremyevans)
* Deprecate Dataset#print and PrettyTable, require 'sequel/extensions/pretty_table' if you want them (jeremyevans)
* Deprecate Database#query and Dataset#query, require 'sequel/extensions/query' if you want them (jeremyevans)
* Deprecate Dataset#paginate and #each_page, require 'sequel/extensions/pagination' if you want them (jeremyevans)
* Fix ~{:bool_col=>true} and related inversions of boolean values (jeremyevans)
* Add disable_insert_returning method to PostgreSQL datasets, so they fallback to just using INSERT (jeremyevans)
* Don't use savepoints by default on PostgreSQL, use the :savepoint option to Database#transaction to use a savepoint (jeremyevans)
* Deprecate Database#transaction accepting a server symbol argument, use an options hash with the :server option (jeremyevans)
* Add Model.use_transactions for setting whether models should use transactions when destroying/saving records (jeremyevans, mjwillson)
* Deprecate Model::Validation::Errors, use Model::Errors (jeremyevans)
* Deprecate string inflection methods, require 'sequel/extensions/inflector' if you use them (jeremyevans)
* Deprecate Model validation class methods, override Model#validate instead or Model.plugin validation_class_methods (jeremyevans)
* Deprecate Model schema methods, use Model.plugin :schema (jeremyevans)
* Deprecate Model hook class methods, use instance methods instead or Model.plugin :hook_class_methods (jeremyevans)
* Deprecate Model.set_sti_key, use Model.plugin :single_table_inheritance (jeremyevans)
* Deprecate Model.set_cache, use Model.plugin :caching (jeremyevans)
* Move most model instance methods into Model::InstanceMethods, for easier overriding of instance methods for all models (jeremyevans)
* Move most model class methods into Model::ClassMethods, for easier overriding of class methods for all models (jeremyevans)
* Deprecate String#to_date, #to_datetime, #to_time, and #to_sequel_time, use require 'sequel/extensions/string_date_time' if you want them (jeremyevans)
* Deprecate Array#extract_options! and Object#is_one_of? (jeremyevans)
* Deprecate Object#meta_def, #meta_eval, and #metaclass (jeremyevans)
* Deprecate Module#class_def, #class_attr_overridable, #class_attr_reader, #metaalias, #metaattr_reader, and #metaatt_accessor (jeremyevans)
* Speed up the calling of most column accessor methods, and reduce memory overhead of creating them (jeremyevans)
* Deprecate Model#set_restricted using Model#[] if no setter method exists, a symbol is used, and the columns are not set (jeremyevans)
* Deprecate Model#set_with_params and #update_with_params (jeremyevans)
* Deprecate Model#save!, use Model.save(:validate=>false) (jeremyevans)
* Deprecate Model#dataset (jeremyevans)
* Deprecate Model.is and Model.is_a, use Model.plugin for plugins (jeremyevans)
* Deprecate Model.str_columns, Model#str_columns, #set_values, #update_values (jeremyevans)
* Deprecate Model.delete_all, .destroy_all, .size, and .uniq (jeremyevans)
* Copy all current dataset options when calling Model.db= (jeremyevans)
* Deprecate Model.belongs_to, Model.has_many, and Model.has_and_belongs_to_many (jeremyevans)
* Remove SQL::SpecificExpression, have subclasses inherit from SQL::Expression instead (jeremyevans)
* Deprecate SQL::CastMethods#cast_as (jeremyevans)
* Deprecate calling Database#schema without a table argument (jeremyevans)
* Remove cached version of @db_schema in model instances to reduce memory and marshalling overhead (tmm1)
* Deprecate Dataset#quote_column_ref and Dataset#symbol_to_column_ref (jeremyevans)
* Deprecate Dataset#size and Dataset#uniq (jeremyevans)
* Deprecate passing options to Dataset#each, #all, #single_record, #single_value, #sql, #select_sql, #update, #update_sql, #delete, #delete_sql, and #exists (jeremyevans)
* Deprecate Dataset#[Integer] (jeremyevans)
* Deprecate Dataset#create_view and Dataset#create_or_replace_view (jeremyevans)
* Model datasets now have a model accessor that returns the related model (jeremyevans)
* Model datasets no longer have :models and :polymorphic_key options (jeremyevans)
* Deprecate Dataset.dataset_classes, Dataset#model_classes, Dataset#polymorphic_key, and Dataset#set_model (jeremyevans)
* Allow Database#get and Database#select to take a block (jeremyevans)
* Deprecate Database#>> (jeremyevans)
* Deprecate String#to_blob and Sequel::SQL::Blob#to_blob (jeremyevans)
* Deprecate use of Symbol#| for SQL array subscripts, add Symbol#sql_subscript (jeremyevans)
* Deprecate Symbol#to_column_ref (jeremyevans)
* Deprecate String#expr (jeremyevans)
* Deprecate Array#to_sql, String#to_sql, and String#split_sql (jeremyevans)
* Deprecate passing an array to Database#<< (jeremyevans)
* Deprecate Range#interval (jeremyevans)
* Deprecate Enumerable#send_each (jeremyevans)
* Deprecate Hash#key on ruby 1.8, change some SQLite adapter constants (jeremyevans)
* Deprecate Sequel.open, Sequel.use_parse_tree=?, and the upcase_identifier methods (jeremyevans)
* Deprecate virtual row blocks without block arguments, unless Sequel.virtual_row_instance_eval is enabled (jeremyevans)
* Support schema parsing in the Oracle adapter (jacaetevha)
* Allow virtual row blocks to be instance_evaled, add Sequel.virtual_row_instance_eval= (jeremyevans)
=== 2.11.0 (2009-03-02)
* Optimize Model.[] by using static sql when possible, for a 30-40% speed increase (jeremyevans)
* Add Dataset#with_sql, which returns a clone of the dataset with static SQL (jeremyevans)
* Refactor Dataset#literal so it doesn't need to be overridden in subadapters, for a 20-25% performance increase (jeremyevans)
* Remove SQL::IrregularFunction, no longer used internally (jeremyevans)
* Allow String#lit to take arguments and return a SQL::PlaceholderLiteralString (jeremyevans)
* Add Model#set_associated_object, used by the many_to_one setter method, for easier overriding (jeremyevans)
* Allow use of database independent types when casting (jeremyevans)
* Give association datasets knowledge of the model object that created them and the related association reflection (jeremyevans)
* Make Dataset#select, #select_more, #order, #order_more, and #get take a block that yields a SQL::VirtualRow, similar to #filter (jeremyevans)
* Fix stored procedures in MySQL adapter when multiple arguments are used (clivecrous)
* Add :conditions association option, for easier filtering of associated objects (jeremyevans)
* Add :clone association option, for making clones of existing associations (jeremyevans)
* Handle typecasting invalid date strings (and possible other types) correctly (jeremyevans)
* Add :compress=>false option to MySQL adapter to turn off compression of client-server connection (tmm1)
* Set SQL_AUTO_IS_NULL=0 on MySQL connections, disable with :auto_is_null=>false (tmm1)
* Add :timeout option to MySQL adapter, default to 30 days (tmm1)
* Set MySQL encoding using Mysql#options so it works across reconnects (tmm1)
* Fully support blobs on SQLite (jeremyevans)
* Add String#to_sequel_blob, alias String#to_blob to that (jeremyevans)
* Fix default index names when a non-String or Symbol column is used (jeremyevans)
* Fix some ruby -w warnings (jeremyevans) (#259)
* Fix issues with default column values, table names, and quoting in the rename_column and drop_column support in shared SQLite adapter (jeremyevans)
* Add rename_column support to SQLite shared adapter (jmhodges)
* Add validates_inclusion_of validation (jdunphy)
=== 2.10.0 (2009-02-03)
* Don't use a default schema any longer in the shared PostgreSQL adapter (jeremyevans)
* Make Dataset#quote_identifier return LiteralStrings as-is (jeremyevans)
* Support symbol keys and unnested hashes in the sequel command line tool's yaml config support (jeremyevans)
* Add schema parsing support to the JDBC adapter (jeremyevans)
* Add per-database type translation support for schema changes, translating ruby classes to database specific types (jeremyevans)
* Add Sequel::DatabaseConnectionError, for indicating that Sequel wasn't able to connect to the database (jeremyevans)
* Add validates_not_string validation, useful in conjunction with raise_on_typecast_failure = false (jeremyevans)
* Don't modify Model#new? and Model#changed_columns when saving a record until after the after hooks have been run (tamas, jeremyevans)
* Database#quote_identifiers= now affects future schema modification statements, even if it is not used before one of the schema modification statements (jeremyevans)
* Fix literalization of blobs when using the PostreSQL JDBC subadapter (jeremyevans)
* Fix literalization of date and time types when using the MySQL JDBC subadapter (jeremyevans)
* Convert some Java specific types to ruby types on output in the JDBC adapter (jeremyevans)
* Add Database#tables method to JDBC adapter (jeremyevans)
* Add H2 JDBC subadapter (logan_barnett, david_koontz, james_britt, jeremyevans)
* Add identifer_output_method, used for converting identifiers coming out of the database, replacing the lowercase support on some databases (jeremyevans)
* Add identifier_input_method, used for converting identifiers going into the database, replacing upcase_identifiers (jeremyevans)
* Add :allow_missing validation option, useful if the database provides a good default (jeremyevans)
* Fix literalization of SQL::Blobs in DataObjects and JDBC adapter's postgresql subadapters when ruby 1.9 is used (jeremyevans)
* When using standard strings in the postgres adapter with the postgres-pr driver, use custom string escaping to prevent errors (jeremyevans)
* Before hooks now run in reverse order of being added, so later ones are run first (tamas)
* Add Firebird adapter, requires Firebird ruby driver located at http://github.com/wishdev/fb (wishdev)
* Don't clobber the following Symbol instance methods when using ruby 1.9: [], <, <=, >, >= (jeremyevans)
* Quote the table name and the index for PostgreSQL index creation (jeremyevans)
* Add DataObjects adapter, supporting PostgreSQL, MySQL, and SQLite (jeremyevans)
* Add ability for Database#create_table to take options, support specifying MySQL engine, charset, and collate per table (pusewicz, jeremyevans)
* Add Model.add_hook_type class method, for adding your own hook types, mostly for use by plugin authors (pkondzior, jeremyevans)
* Add Sequel.version for getting the internal version of Sequel (pusewicz, jeremyevans)
=== 2.9.0 (2009-01-12)
* Add -L option to sequel command line tool to load all .rb files in the given directory (pkondzior, jeremyevans)
* Fix Dataset#destroy for model datasets that can't handle nested queries (jeremyevans)
* Improve the error messages in parts of Sequel::Model (jeremyevans, pusewicz)
* Much better support for Dataset#{union,except,intersect}, allowing chaining and respecting order (jeremyevans)
* Default to logging only WARNING level messages when connecting to PostgreSQL (jeremyevans)
* Fix add_foreign_key for MySQL (jeremyevans, aphyr)
* Correctly literalize BigDecimal NaN and (+-)Infinity values (jeremyevans) (#256)
* Make Sequel raise an Error if you attempt to subclass Sequel::Model before setting up a database connection (jeremyevans)
* Add Sequel::BeforeHookFailed exception to be raised when a record fails because a before hook fails (bougyman)
* Add Sequel::ValidationFailed exception to be raised when a record fails because a validation fails (bougyman)
* Make Database#schema raise an error if given a table that doesn't exist (jeremyevans) (#255)
* Make Model#inspect call Model#inspect_values private method for easier overloading (bougyman)
* Add methods to create and drop functions, triggers, and procedural languages on PostgreSQL (jeremyevans)
* Fix Dataset#count when using UNION, EXCEPT, or INTERSECT (jeremyevans)
* Make SQLite keep table's primary key information when dropping columns (jmhodges)
* Support dropping indicies on SQLite (jmhodges)
=== 2.8.0 (2008-12-05)
* Support drop column operations inside a transaction on sqlite (jeremyevans)
* Support literal strings with placeholders and subselects in prepared statements (jeremyevans)
* Have the connection pool remove disconnected connections when the adapter supports it (jeremyevans)
* Make Dataset#exists return a LiteralString (jeremyevans)
* Support multiple SQL statements in one query in the MySQL adapter (jeremyevans)
* Add stored procedure support for the MySQL and JDBC adapters (jeremyevans, krsgoss) (#252)
* Support options when altering a column's type (for changing enums, varchar size, etc.) (jeremyevans)
* Support AliasedExpressions in tables when using implicitly qualified arguments in joins (jeremyevans)
* Support Dataset#except on Oracle (jeremyevans)
* Raise errors when EXCEPT/INTERSECT is used when not supported (jeremyevans)
* Fix ordering of UNION, INTERSECT, and EXCEPT statements (jeremyevans) (#253)
* Support aliasing subselects in the Oracle adapter (jeremyevans)
* Add a subadapter for the Progress RDBMS to the ODBC adapter (:db_type=>'progress') (groveriffic) (#251)
* Make MySQL and Oracle adapters raise an Error if asked to do a SELECT DISTINCT ON (jeremyevans)
* Set standard_conforming_strings = ON by default when using PostgreSQL, turn off with Sequel::Postgres.force_standard_strings = false (jeremyevans) (#247)
* Fix Database#rename_table when using PostgreSQL (jeremyevans) (#248)
* Whether to upcase or quote identifiers can now be set separately, via Sequel.upcase_identifiers= or the :upcase_identifiers database option (jeremyevans)
* Support transactions in the ODBC adapter (dlee)
* Support multi_insert_sql and unicode string literals in MSSQL shared adapter (dlee)
* Make PostgreSQL use the default schema if parsing the schema for all tables at once, even if :schema=>nil option is used (jeremyevans)
* Make MySQL adapter not raise an error when giving an SQL::Identifier object to the schema modification methods such as create_table (jeremyevans)
* The keys of the hash returned by Database#schema without a table name are now quoted strings instead of symbols (jeremyevans)
* Make Database#schema to handle implicit schemas on all databases and multiple identifier object types (jeremyevans)
* Remove Sequel.odbc_mssql method (jeremyevans) (#249)
* More optimization of Model#initialize (jeremyevans)
* Treat interval as it's own type, not an integer type (jeremyevans)
* Allow use of implicitly qualified symbol as argument to Symbol#qualify (:a.qualify(:b__c)=>b.c.a), fixes model associations in different schemas (jeremyevans) (#246)
=== 2.7.1 (2008-11-04)
* Fix PostgreSQL Date optimization so that it doesn't reject dates like 11/03/2008 (jeremyevans)
=== 2.7.0 (2008-11-03)
* Transform AssociationReflection from a single class to a class hierarchy (jeremyevans)
* Optimize Date object creation in PostgreSQL adapter (jeremyevans)
* Allow easier creation of custom association types, though support for them may still be suboptimal (jeremyevans)
* Add :eager_grapher option to associations, which the user can use to override the default eager_graph code (jeremyevans)
* Associations are now inherited when a model class is subclassed (jeremyevans)
* Instance methods added by associations are now added to an anonymous module the class includes, allowing you to override them and use super (jeremyevans)
* Add #add_graph_aliases (select_more for graphs), and allow use of arbitrary expressions when graphing (jeremyevans)
* Fix a corner case where the wrong table name is used in eager_graph (jeremyevans)
* Make Dataset#join_table take an option hash instead of a table_alias argument, add support for :implicit_qualifier option (jeremyevans)
* Add :left_primary_key and :right_primary_key options to many_to_many associations (jeremyevans)
* Add :primary_key option to one_to_many and many_to_one associations (jeremyevans)
* Make after_load association callbacks take effect when eager loading via eager (jeremyevans)
* Add a :uniq association option to many_to_many associations (jeremyevans)
* Support using any expression as the argument to Symbol#like (jeremyevans)
* Much better support for multiple schemas in PostgreSQL (jeremyevans) (#243)
* The first argument to Model#initalize can no longer be nil, it must be a hash if it is given (jeremyevans)
* Remove Sequel::Model.lazy_load_schema= setting (jeremyevans)
* Lazily load model instance options such as raise_on_save_failure, for better performance (jeremyevans)
* Make Model::Validiation::Errors more Rails-compatible (jeremyevans)
* Refactor model hooks for performance (jeremyevans)
* Major performance enhancement when fetching rows using PostgreSQL (jeremyevans)
* Don't typecast serialized columns in models (jeremyevans)
* Add Array#sql_array to handle ruby arrays of all two pairs as SQL arrays (jeremyevans) (#245)
* Add ComplexExpression#== and #eql?, for checking equality (rubymage) (#244)
* Allow full text search on PostgreSQL to include rows where a search column is NULL (jeremyevans)
* PostgreSQL full text search queries with multiple columns are joined with space to prevent joining border words to one (michalbugno)
* Don't modify a dataset's cached column information if calling #each with an option that modifies the columns (jeremyevans)
* The PostgreSQL adapter will now generally default to using a unix socket in /tmp if no host is specified, instead of a tcp socket to localhost (jeremyevans)
* Make Dataset#sql call Dataset#select_sql instead of being an alias, to allow for easier subclassing (jeremyevans)
* Split Oracle adapter into shared and unshared parts, so Oracle is better supported when using JDBC (jeremyevans)
* Fix automatic loading of Oracle driver when using JDBC adapter (bburton333) (#242)
=== 2.6.0 (2008-10-11)
* Make the sqlite adapter respect the Sequel.datetime_class setting, for timestamp and datetime types (jeremyevans)
* Enhance the CASE statement support to include an optional expression (jarredholman)
* Default to using the simple language if no language is specified for a full text index on PostgreSQL (michalbugno)
* Add Model.raise_on_typecast_failure=, which makes it possible to not raise errors on invalid typecasts (michalbugno)
* Add schema.rdoc file, which provides an brief description of the various parts of Sequel related to schema modification (jeremyevans)
* Fix constraint generation when not using a proc or interpolated string (jeremyevans)
* Make eager_graph respect associations' :order options (use :order_eager_graph=>false to disable) (jeremyevans)
* Cache negative lookup when eagerly loading many_to_one associations where no objects have an associated object (jeremyevans)
* Allow string keys to be used when using Dataset#multi_insert (jeremyevans)
* Fix join_table when doing the first join for a dataset where the first source is a dataset when using unqualified columns (jeremyevans)
* Fix a few corner cases in eager_graph (jeremyevans)
* Support transactions on MSSQL (jeremyevans)
* Use string literals in AS clauses on SQLite (jeremyevans) (#241)
* AlterTableGenerator#set_column_allow_null was added to SET/DROP NOT NULL for columns (divoxx)
* Database#tables now works for MySQL databases using the JDBC adapter (jeremyevans)
* Database#drop_view can now take multiple arguments to drop multiple views at once (jeremyevans)
* Schema modification methods (e.g. drop_table, create_table!) now remove the cached schema entry (jeremyevans)
* Models can now determine their primary keys by looking at the schema (jeremyevans)
* No longer include :numeric_precision and :max_chars entries in the schema column hashes, use the :db_type entry instead (jeremyevans)
* Make schema parsing on PostgreSQL handle implicit schemas (e.g. schema(:schema__table)), so it works with models for tables outside the public schema (jeremyevans)
* Significantly speed up schema parsing on MySQL (jeremyevans)
* Include primary key information when parsing the schema (jeremyevans)
* Fix schema generation of composite foreign keys on MySQL (clivecrous, jeremyevans)
=== 2.5.0 (2008-09-03)
* Add Dataset #set_defaults and #set_overrides, used for scoping the values used in insert/update statements (jeremyevans)
* Allow Models to use the RETURNING clause when inserting records on PostgreSQL (jeremyevans)
* Raise Sequel::DatabaseError instead of generic Sequel::Error for database errors, don't swallow tracebacks (jeremyevans)
* Use INSERT ... RETURNING ... with PostgreSQL 8.2 and higher (jeremyevans)
* Make insert_sql, delete_sql, and update_sql respect the :sql option (jeremyevans)
* Default to converting 2 digit years, use Sequel.convert_two_digit_years = false to get back the old behavior (jeremyevans)
* Make the PostgreSQL adapter with the pg driver use async_exec, so it doesn't block the entire interpreter (jeremyevans)
* Make the schema generators support composite primary and foreign keys and unique constraints (jarredholman)
* Work with the 2008.08.17 version of the pg gem (erikh)
* Disallow abuse of SQL function syntax for types (use :type=>:varchar, :size=>255 instead of :type=>:varchar[255]) (jeremyevans)
* Quote index names when creating or dropping indexes (jeremyevans, SanityInAnarchy)
* Don't have column accessor methods override plugin instance methods (jeremyevans)
* Allow validation of multiple attributes at once, with built in support for uniqueness checking of multiple columns (jeremyevans)
* In PostgreSQL adapter, fix inserting a row with a primary key value inside a transaction (jeremyevans)
* Allow before_save and before_update to affect the columns saved by save_changes (jeremyevans)
* Make Dataset#single_value work when graphing, which fixes count and paginate on graphed datasets (jeremyevans)
=== 2.4.0 (2008-08-06)
* Handle Java::JavaSql::Date type in the JDBC adapter (jeremyevans)
* Add support for read-only slave/writable master databases and database sharding (jeremyevans)
* Remove InvalidExpression, InvalidFilter, InvalidJoinType, and WorkerStop exceptions (jeremyevans)
* Add prepared statement/bound variable support (jeremyevans)
* Fix anonymous column names in the ADO adapter (nusco)
* Remove odbc_mssql adapter, use :db_type=>'mssql' option instead (jeremyevans)
* Split MSSQL specific syntax into separate file, usable by ADO and ODBC adapters (nusco, jeremyevans)
=== 2.3.0 (2008-07-25)
* Enable almost full support for MySQL using JDBC (jeremyevans)
* Fix ODBC adapter's conversion of ::ODBC::Time values (Michael Xavier)
* Enable full support for SQLite-JDBC using the JDBC adapter (jeremyevans)
* Minor changes to allow for full Ruby 1.9 compatibility (jeremyevans)
* Make Database#disconnect work for the ADO adapter (spicyj)
* Don't raise an exception in the ADO adapter if the dataset contains no records (nusco)
* Enable almost full support of PostgreSQL-JDBC using the JDBC adapter (jeremyevans)
* Remove Sequel::Worker (jeremyevans)
* Make PostgreSQL adapter not raise an error when inserting records into a table without a primary key (jeremyevans)
* Make Database.uri_to_options a private class method (jeremyevans)
* Make JDBC load drivers automatically for PostgreSQL, MySQL, SQLite, Oracle, and MSSQL (jeremyevans)
* Make Oracle adapter work with a nonstandard Oracle database port (pavel.lukin)
* Typecast '' to nil by default for non-string non-blob columns, add typecast_empty_string_to_nil= model class and instance methods (jeremyevans)
* Use a simpler select in Dataset#empty?, fixes use with MySQL (jeremyevans)
* Add integration test suite, testing sequel against a real database, with nothing mocked (jeremyevans)
* Make validates_length_of default tag depend on presence of options passed to it (jeremyevans)
* Combine the directory structure for sequel_model and sequel_core, now there is going to be only one gem named sequel (jeremyevans)
=== 2.2.0 (2008-07-05)
* Add :extend association option, extending the dataset with module(s) (jeremyevans)
* Add :after_load association callback option, called after associated objects have been loaded from the database (jeremyevans)
* Make validation methods support a :tag option, to work correctly with source reloading (jeremyevans)
* Add :before_add, :after_add, :before_remove, :after_remove association callback options (jeremyevans)
* Break many_to_one association setter method in two parts, for easier overriding (jeremyevans)
* Model.validates_presence_of now considers false as present instead of absent (jeremyevans)
* Add Model.raise_on_save_failure, raising errors on save failure instead of return false (now nil), default to true (jeremyevans)
* Add :eager_loader association option, to specify code to be run when eager loading (jeremyevans)
* Make :many_to_one associations support :dataset, :order, :limit association options, as well as block arguments (jeremyevans)
* Add :dataset association option, which overrides the default base dataset to use (jeremyevans)
* Add :eager_graph association option, works just like :eager except it uses #eager_graph (jeremyevans)
* Add :graph_join_table_join_type association option (jeremyevans)
* Add :graph_only_conditions and :graph_join_table_only_conditions association options (jeremyevans)
* Add :graph_block and :graph_join_table_block association options (jeremyevans)
* Set the model's dataset's columns in addition to the model's columns when loading the schema for a model (jeremyevans)
* Make caching work correctly with subclasses (jeremyevans)
* Add the Model.to_hash dataset method (jeremyevans)
* Filter blocks now yield a SQL::VirtualRow argument, which is useful if another library defines operator methods on Symbol (jeremyevans)
* Add Symbol#identifier method, to make x__a be treated as "x__a" instead of "x"."a" (jeremyevans)
* Dataset#update no longer takes a block, please use a hash argument with the expression syntax instead (jeremyevans)
* ParseTree support has been removed from Sequel (jeremyevans)
* Database#drop_column is now supported in the SQLite adapter (abhay)
* Tinyint columns can now be considered integers instead of booleans by setting Sequel.convert_tinyint_to_bool = false (samsouder)
* Allow the use of URL parameters in connection strings (jeremyevans)
* Ignore any previously selected columns when using Dataset#graph for the first time (jeremyevans)
* Dataset#graph now accepts a block which is passed to join_table (jeremyevans)
* Make Dataset#columns ignore any filtering, ordering, and distinct clauses (jeremyevans)
* Use the safer connection-specific string escaping methods for PostgreSQL (jeremyevans)
* Database#transaction now yields a connection when using the Postgres adapter, just like it does for other adapters (jeremyevans)
* Dataset#count now works for a limited dataset (divoxx)
* Database#add_index is now supported in the SQLite adapter (abhay)
* Sequel's MySQL adapter should no longer conflict with ActiveRecord's use of MySQL (careo)
* Treat Hash as expression instead of column alias when used in DISTINCT, ORDER BY, and GROUP BY clauses (jeremyevans)
* PostgreSQL bytea fields are now fully supported (dlee)
* For PostgreSQL, don't raise an error when assigning a value to a SERIAL PRIMARY KEY field when inserting records (jeremyevans)
=== 2.1.0 (2008-06-17)
* Break association add_/remove_/remove_all_ methods into two parts, for easier overriding (jeremyevans)
* Add Model.strict_param_setting, on by default, which raises errors if a missing/restricted method is called via new/set/update/etc. (jeremyevans)
* Raise errors when using association methods on objects without valid primary keys (jeremyevans)
* The model's primary key is a restricted column by default, Add model.unrestrict_primary_key to get the old behavior (jeremyevans)
* Add Model.set_(allowed|restricted)_columns, which affect which columns create/new/set/update/etc. modify (jeremyevans)
* Calls to Model.def_dataset_method with a block are cached and reapplied to the new dataset if set_dataset is called, even in a subclass (jeremyevans)
* The :reciprocal option to associations should now be the symbol name of the reciprocal association, not an instance variable symbol (jeremyevans)
* Add Model#associations, which is a hash holding a cache of associated objects, with each association being a separate key (jeremyevans)
* Make all associations support a :graph_select option, specifying a column or array of columns to select when using eager_graph (jeremyevans)
* Bring back Model#set and Model#update, now the same as Model#set_with_params and Model#update_with_params (jeremyevans)
* Allow model datasets to call to_hash without any arguments, which allows easy creation of identity maps (jeremyevans)
* Add Model.set_sti_key, for easily setting up single table inheritance (jeremyevans)
* Make all associations support a :read_only option, which doesn't add methods that modify the database (jeremyevans)
* Make *_to_many associations support a :limit option, for specifying a limit to the resulting records (and possibly an offset) (jeremyevans)
* Make association block argument and :eager option affect the _dataset method (jeremyevans)
* Add a :one_to_one option to one_to_many associations, which creates a getter and setter similar to many_to_one (a.k.a. has_one) (jeremyevans)
* add_ and remove_ one_to_many association methods now raise an error if the passed object cannot be saved, instead of saving without validation (jeremyevans)
* Add support for :if option on validations, using a symbol (specifying an instance method) or a proc (dtsato)
* Support bitwise operators for NumericExpressions: &, |, ^, ~, <<, >> (jeremyevans)
* No longer raise an error for Dataset#filter(true) or Dataset#filter(false) (jeremyevans)
* Allow Dataset #filter, #or, #exclude and other methods that call them to use both the block and regular arguments (jeremyevans)
* ParseTree support is now officially deprecated, use Sequel.use_parse_tree = false to use the expression (blockless) filters inside blocks (jeremyevans)
* Remove :pool_reuse_connections ConnectionPool/Database option, MySQL users need to be careful with nested queries (jeremyevans)
* Allow Dataset#graph :select option to take an array of columns to select (jeremyevans)
* Allow Dataset#to_hash to be called with only one argument, allowing for easy creation of lookup tables for a single key (jeremyevans)
* Allow join_table to accept a block providing the aliases and previous joins, that allows you to specify arbitrary conditions properly qualified (jeremyevans)
* Support NATURAL, CROSS, and USING joins in join_table (jeremyevans)
* Make sure HAVING comes before ORDER BY, per the SQL standard and at least MySQL, PostgreSQL, and SQLite (juco)
* Add cast_numeric and cast_string methods for use in the Sequel DSL, that have default types and wrap the object in the correct class (jeremyevans)
* Add Symbol#qualify, for adding a table/schema qualifier to a column/table name (jeremyevans)
* Remove Module#metaprivate, since it duplicates the standard Module#private_class_method (jeremyevans)
* Support the SQL CASE expression via Array#case and Hash#case (jeremyevans)
* Support the SQL EXTRACT function: :date.extract(:year) (jeremyevans)
* Convert numeric fields to BigDecimal in PostgreSQL adapter (jeremyevans)
* Add :decimal fields to the schema parser (jeremyevans)
* The expr argument in join table now allows the same argument as filter, so it can take a string or a blockless filter expression (brushbox, jeremyevans)
* No longer assume the expr argument to join_table references the primary key column (jeremyevans)
* Rename the Sequel.time_class setting to Sequel.datetime_class (jeremyevans)
* Add savepoint/nesting support to postgresql transactions (elven)
* Use the specified table alias when joining a dataset, instead of the automatically generated alias (brushbox)
=== 2.0.1 (2008-06-04)
* Make the choice of Time or DateTime optional for typecasting :datetime types, default to Time (jeremyevans)
* Reload database schema for table when calling Model.create_table (jeremyevans)
* Have PostgreSQL money type use BigDecimal instead of Float (jeremyevans)
* Have the PostgreSQL and MySQL adapters use the Sequel.time_class setting for datetime/timestamp types (jeremyevans)
* Add Sequel.time_class and String#to_sequel_time, used for converting time values from the database to either Time (default) or DateTime (jeremyevans)
* Make identifier quoting uppercase by default, to work better with the SQL standard, override in PostgreSQL (jeremyevans) (#232)
* Add StringExpression#+, for simple SQL string concatenation (:x.sql_string + :y) (jeremyevans)
* Make StringMethods.like to a case sensensitive search on MySQL (use ilike for the old behavior) (jeremyevans)
* Add StringMethods.ilike, for case insensitive pattern matching (jeremyevans)
* Refactor ComplexExpression into three subclasses and a few modules, so operators that don't make sense are not defined for the class (jeremyevans)
=== 2.0.0 (2008-06-01)
* Comprehensive update of all documentation (jeremyevans)
* Remove methods deprecated in 1.5.0 (jeremyevans)
* Add typecasting on attribute assignment to Sequel::Model objects, optional but enabled by default (jeremyevans)
* Returning false in one of the before_ hooks now causes the appropriate method(s) to immediately return false (jeremyevans)
* Add remove_all_* association method for *_to_many associations, which removes the association with all currently associated objects (jeremyevans)
* Add Model.lazy_load_schema=, when set to true, it loads the schema on first instantiation (jeremyevans)
* Add before_validation and after_validation hooks, called whenever the model is validated (jeremyevans)
* Add Model.default_foreign_key, a private class method that allows changing the default foreign key that Sequel will use in associations (jeremyevans)
* Cache negative lookup when eagerly loading many_to_one associations (jeremyevans)
* Make all associations support the :select option, not just many_to_many (jeremyevans)
* Allow the use of blocks when eager loading, and add the :eager_block and :allow_eager association options for configuration (jeremyevans)
* Add the :graph_join_type, :graph_conditions, and :graph_join_table_conditions association options, used when eager graphing (jeremyevans)
* Add AssociationReflection class (subclass of Hash), to make calling a couple of private Model methods unnecessary (jeremyevans)
* Change hook methods so that if a tag/method is specified it overwrites an existing hook block with the same tag/method (jeremyevans)
* Refactor String inflection support, you must use String.inflections instead of Inflector.inflections now (jeremyevans)
* Allow connection to ODBC-MSSQL via a URL (petersumskas) (#230)
* Comprehensive update of all documentation, except for the block filters and adapters (jeremyevans)
* Handle Date and DateTime value literalization correctly in adapters (jeremyevans)
* Literalize DateTime values the same as Time values (jeremyevans)
* MySQL tinyints are now returned as boolean values instead of integers (jeremyevans)
* Set additional MySQL charset options required for creating tables and databases (tmm1)
* Remove methods deprecated in 1.5.0 (jeremyevans)
* Add Module#metaattr_accessor for creating attr_accessors for the metaclass (jeremyevans)
* Add SQL string concatenation support to blockless filters, via Array#sql_string_join (jeremyevans)
* Add Pagination#last_page? and Pagination#first_page? (apeiros)
* Add limited column reflection support, tested on PostgreSQL, MySQL, and SQLite (jeremyevans)
* Allow the use of :schema__table___table_alias syntax for tables, similar to the column support (jeremyevans)
* Merge metaid gem into core_ext.rb and clean it up, so sequel now has no external dependencies (jeremyevans)
* Add Dataset#as, so using a dataset as a column with an alias is not deprecated (jeremyevans)
* Add Dataset#invert, which returns a dataset with inverted HAVING and WHERE clauses (jeremyevans)
* Add blockless filter syntax support (jeremyevans)
* Passing an array to Dataset#order and Dataset#select no longer works, you need to pass multiple arguments (jeremyevans)
* You should use '?' instead of '(?)' when using interpolated strings with array arguments (jeremyevans)
* Dataset.literal now surrounds the literalization of arrays with parentheses (jeremyevans)
* Add echo option (back?) to sequel command line tool, via -E or --echo (jeremyevans)
* Allow databases to have multiple loggers (jeremyevans)
* The sequel command line tool now also accepts a path to a database config YAML file in addition to a URI (mtodd)
* Major update of the postgresql adapter (jdavis, jeremyevans) (#225)
* Make returning inside of a database transaction commit the transaction (ahoward, jeremyevans)
* Dataset#to_table_reference is now protected, and it has a different API (jeremyevans)
* Dataset#join_table and related functions now take an explicit optional table_alias argument, you can no longer include the table alias in the table argument (jeremyevans)
* Aliased and/or qualified columns with embedded spaces can now be specified as symbols (jeremyevans)
* When identifier quoting is enabled, the SQL standard double quote is used by default (jeremyevans)
* When identifier quoting is enabled, quote tables as well as columns (jeremyevans)
* Make identifier quoting optional, enabled by default (jeremyevans)
* Allow Sequel::Database.connect and related methods to take a block that disconnects the database when the block finishes (jeremyevans)
* Add Dataset#unfiltered, for removing filters from dataset (jeremyevans)
* Add add_foreign_key and add_primary_key methods to the AlterTableGenerator (jeremyevans)
* Allow migration files to have more than 3 digits (jeremyevans)
* Add methods directly to Dataset instead of including modules (jeremyevans)
* Make some Dataset instance methods private: invert_order, insert_default_values_sql (jeremyevans)
* Don't add methods that depend on ParseTree unless you can load ParseTree (jeremyevans)
* Don't wipeout the cached columns every time a dataset is cloned, but only on changes to :select, :sql, :from, or :join (jeremyevans)
* Fix Oracle Adapter (yasushi.abe)
* Fixed sqlite uri so that sqlite:// works just like file:// (2 slashes for a relative path, 3 for an absolute) (dlee)
* Raise a Sequel::Error if an invalid limit or offset is used (jeremyevans)
* Refactor and beef up Dataset#first and Dataset#last, with some change in functionality (jeremyevans)
* Add String#to_datetime, for consistency (jeremyevans)
* Fix Range#interval so that it returns 1 less for an exclusive range
* Change SQLite adapter so it doesn't swallow exceptions other than SQLite3::Exception (such as Interrupt) (jeremyevans)
* Change PostgreSQL and MySQL adapters to raise Sequel::Error instead of database specific errors if a database error occurs (jeremyevans)
* Using a memory database with SQLite now defaults to a single connection, so all queries it uses run against the same database (jeremyevans)
* Fix attempting to query MySQL using the same connection being used to concurrently execute another query (jeremyevans)
* Add options to the connection pool to configure reusing connections and converting exceptions (jeremyevans)
* Use the database driver provided string quoting methods for MySQL and SQLite (jeremyevans) (#223)
* Add ColumnAll#==, for checking the equality of two ColumnAlls (jeremyevans)
* Allow an array of arrays instead of a hash when specifying conditions (jeremyevans)
* Add Sequel::DBI::Database#lowercase, for lowercasing column names (jamesearl)
* Remove Dataset#extend_with_destroy, which may break code that uses Dataset#set_model directly and expects the destroy method to be added (jeremyevans)
* Fix some issues when running on Ruby 1.9 (Zverok, jeremyevans)
* Make the DBI adapter work (partially) with PostgreSQL (Seb)
=== 1.5.1 (2008-04-30)
* Fix Dataset#eager_graph when not all objects have associated objects (jeremyevans)
* Have Dataset#graph give a nil value instead of a hash with all nil values if no matching rows exist in the graphed table (jeremyevans)
=== 1.5.0 (2008-04-29)
* Make the validation errors API compatible with Merb (Inviz)
* Add validates_uniqueness_of, for protecting against duplicate entries in the database (neaf, jeremyevans)
* Alias Model#dataset= to Model#set_dataset (tmm1)
* Make some Model class methods private: def_hook_method, hooks, add_hook, plugin_module, plugin_gem (jeremyevans)
* Add the eager! and eager_graph! mutation methods to model datasets (jeremyevans)
* Remove Model.database_opened (jeremyevans)
* Remove Model.super_dataset (jeremyevans)
* Deprecate .create_with_params, .create_with, #set, #update, #update_with, and #new_record from Sequel::Model (jeremyevans)
* Add Model.def_dataset_method, for defining methods on the model that reference methods on the dataset (jeremyevans)
* Deprecate Model.method_missing, add dataset methods to Model via metaprogramming (jeremyevans)
* Remove Model.join, so it is the same as Dataset#join (jeremyevans)
* Use reciprocal associations for all types of associations in the getter/setter/add_/remove_ methods (jeremyevans)
* Fix many_to_one associations to cache negative lookups (jeremyevans)
* Change Model#=== to always be false if the primary key is nil (jeremyevans)
* Add Model#hash, which should be unique for a given class and primary key (or values if primary key is nil) (jeremyevans)
* Add Model#eql? as a alias to Model#== (jeremyevans)
* Make Model#reload clear any cached associations (jeremyevans)
* No longer depend on the assistance gem, merge the Inflector and Validations code (jeremyevans)
* Add Model#set_with_params, which is Model#update_with_params without the save (jeremyevans)
* Fix Model#destroy so that it returns self, not the result of after_destroy (jeremyevans)
* Define Model column accessors in set_dataset, so they should always be avaiable, deprecate Model#method_missing (jeremyevans)
* Add eager loading of associations via new sequel_core object graphing feature (jeremyevans)
* Fix many_to_many associations with classes inside modules without an explicit join table (jeremyevans)
* Allow creation of new records that don't have primary keys when the cache is on (jeremyevans) (#213)
* Make Model#initialize, Model#set, and Model#update_with_params invulnerable to memory exhaustion (jeremyevans) (#210)
* Add Model.str_columns, which gives a list of columns as frozen strings (jeremyevans)
* Remove pretty_table.rb from sequel, since it is in sequel_core (jeremyevans)
* Set a timeout in the Sqlite adapter, default to 5 seconds (hrvoje.marjanovic) (#218)
* Document that calling Sequel::ODBC::Database#execute manually requires you to manually drop the returned object (jeremyevans) (#217)
* Paginating an already paginated/limited dataset now raises an error (jeremyevans)
* Add support for PostgreSQL partial indexes (dlee)
* Added support for arbitrary index types (including spatial indexes) (dlee)
* Quote column names in SQL generated for SQLite (tmm1)
* Deprecate Object#rollback! (jeremyevans)
* Make some Dataset methods private (qualified_column_name, column_list, table_ref, source_list) (jeremyevans)
* Deprecate Dataset methods #set_options, #set_row_proc, #remove_row_proc, and #clone_merge (jeremyevans)
* Add Symbol#*, a replacement for Symbol#all (jeremyevans)
* Deprecate including ColumnMethods in Object, include it in Symbol, String, and Sequel::SQL::Expression (jeremyevans)
* Deprecate Symbol#method_missing, and #AS, #DESC, #ASC, #ALL, and #all from ColumnMethods (jeremyevans)
* Fix table joining in MySQL (jeremyevans)
* Deprecate Sequel.method_missing and Object#Sequel, add real Sequel.adapter methods (jeremyevans)
* Move dataset methods applicable only to paginated datasets into Sequel::Dataset::Pagination (jeremyevans)
* Make Sequel::Dataset::Sequelizer methods private (jeremyevans)
* Deprecate Dataset#method_missing, add real mutation methods (e.g. filter!) (jeremyevans)
* Fix connecting to an MSSQL server via ODBC using domain user credentials (jeremyevans) (#216)
* No longer depend on the assistance gem, merge in the ConnectionPool and .blank methods (jeremyevans)
* No longer depend on ParseTree, RubyInline, or ruby2ruby, but you still need them if you want to use the block filters (jeremyevans)
* Fix JDBC adapter by issuing index things start at 1 (pdamer)
* Fix connecting to a database via the ADO adapter (now requires options instead of URI) (timuckun, jeremyevans) (#204)
* Support storing microseconds in postgres timestamp fields (schnarch...@rootimage.msu.edu) (#215)
* Allow joining of multiple datasets, by making the table alias different for each dataset joined (jeremyevans)
* SECURITY: Fix backslash escaping of strings (dlee)
* Add ability to create a graph of objects from a query, with the result split into corresponding tables (jeremyevans) (#113)
* Add attr_accessor for dataset row_proc (jeremyevans)
* Don't redefine Dataset#each when adding a transform or row_proc (jeremyevans)
* Remove array_keys.rb from sequel_core, it was partially broken (since the arrays came from hashes), and redefined Dataset#each (jeremyevans)
* Fix MySQL default values insert (matt.binary) (#196)
* Fix ODBC adapter improperly escaping date and timestamp values (leo.borisenko) (#165)
* Fix renaming columns on MySQL with type :varchar (jeremyevans) (#206)
* Add Sequel::SQL::Function#==, for comparing SQL Functions (jeremyevans) (#209)
* Update Informix adapter to work with Ruby/Informix 0.7.0 (gerardo.santana@gmail.com)
* Remove sequel_core's knowledge of Sequel::Model (jeremyevans)
* Use "\n" instead of $/ (since $/ can be redefined in ways we do not want) (jeremyevans)
=== 1.4.0 (2008-04-08)
* Don't mark a column as changed unless the new value is different from the current value (tamas.denes, jeremyevans) (#203).
* Switch gem name from "sequel_model" to just "sequel", which required large version bump (jeremyevans).
* Add :select option to many_to_many associations, default to selecting only the associated model table and not the join table (jeremyevans) (#208).
* Add :reciprocal one_to_many association option, for setting corresponding many_to_one instance variable (jeremyevans).
* Add eager loading implementation (jeremyevans).
* Change *_to_many associations so that the all associations are considered :cache=>true (jeremyevans).
* Fix associations with block arguments and :cache=>true (jeremyevans).
* Merge 3 mysql patches from the bugtracker (mvyver) (#200, #201, #202).
* Merge 2 postgresql patches from the bugtracker (a...@mellowtone.co.jp) (#211, 212).
* Allow overriding of default posgres spec database via ENV['SEQUEL_PG_SPEC_DB'] (jeremyevans).
* Allow using the Sequel::Model as the first argument in a dataset join selection (jeremyevans) (#170).
* Add simple callback mechanism to make model eager loading implementation easier (jeremyevans).
* Added Sequel::Error::InvalidOperation class for invalid operations (#198).
* Implemented MySQL::Database#server_version (#199).
* Added spec configuration for MySQL socket file.
* Fixed transform with array tuples in postgres adapter.
* Changed spec configuration to Database objects instead of URIs in order to support custom options for spec databases.
* Renamed schema files.
* Fixed Dataset#from to work correctly with SQL functions (#193).
===Previous to 1.4.0, Sequel model and Sequel core versioning differed, see the bottom of this file for the changelog to Sequel model prior to 1.4.0.
=== 1.3 (2008-03-08)
* Added configuration file for running specs (#186).
* Changed Database#drop_index to accept fixed arity (#173).
* Changed column definition sql to put UNSIGNED constraint before unique in order to satisfy MySQL (#171).
* Enhanced MySQL adapter to support load data local infile_, added compress option for mysql connection by default (#172).
* Fixed bug when inserting hashes in array tuples mode.
* Changed SQLite adapter to catch RuntimeError raised when executing a statement and raise an Error::InvalidStatement with the offending SQL and error message (#188).
* Added Error::InvalidStatement class.
* Fixed Dataset#reverse to not raise for unordered dataset (#189).
* Added Dataset#unordered method and changed #order to remove order if nil is specified (#190).
* Fixed reversing order of ASC expression (#164).
* Added support for :null => true option when defining table columns (#192).
* Fixed Symbol#method_missing to accept variable arity (#185).
=== 1.2.1 (2008-02-29)
* Added add_constraint and drop_constraint functionality to Database#alter_table (#182).
* Enhanced Dataset#multi_insert to accept datasets (#179).
* Added MySQL::Database#use method for switching database (#180).
* Enhanced Database.uri_to_options to accept uri strings (#178).
* Added Dataset#columns! method that always makes a roundtrip to the DB (#177).
* Added new Dataset#each_page method that iterates over all pages in the result set (#175).
* Added Dataset#reverse alias to Dataset#reverse_order (#174).
* Fixed Dataset#transform_load and #transform_save to create a trasnformed copy of the supplied hash instead of transforming it in place (#184).
* Implemented MySQL::Dataset#replace (#163).
=== 1.2 (2008-02-15)
* Added support for :varchar[100] like type declarations in #create_table.
* Fixed #rename_column in mysql adapter to support types like varchar(255) (#159).
* Added support for order and limit in DELETE statement in MySQL adapter (#160).
* Added checks to Dataset#multi_insert to prevent work if no values are given (#162).
* Override ruby2ruby implementation of Proc#to_sexp which leaks memory (#161).
* Added log option, help for sequel script (#157).
=== 1.1 (2008-02-15)
* Fixed Dataset#join_table to support joining of datasets (#156).
* Changed Dataset#empty? to use EXISTS condition instead of counting records, for much better performance (#158).
* Implemented insertion of multiple records in a single statement for postgres adapter. This feature is available only in postgres 8.2 and newer.
* Implemented Postgres::Database#server_version.
* Implemented Database#get, short for dataset.get(...).
* Refactored Dataset#multi_insert, added #import alias, added support for calling #multi_insert using array of columns and array of value arrays (thanks David Lee).
* Implemented Dataset#get, a replacement for select(column).first[column].
* Implemented Dataset#grep method, poor man's text search.
=== 1.0.10 (2008-02-13)
* Fixed Datset#group_and_count to work inside a query block (#152).
* Changed datasets with transforms to automatically transform hash filters (#155).
* Changed Marshal stock transform to use Base64 encoding with backward-compatibility to support existing marshaled values (#154).
* Added support for inserting multiple records in a single statement using #multi_insert in MySQL adapter (#153).
* Added support for :slice option (same as :commit_every) in Dataset#multi_insert.
* Changed Dataset#all to accept opts and iteration block.
=== 1.0.9 (2008-02-10)
* Implemented Dataset#inspect and Database#inspect (#151).
* Added full-text searching for odbc_mssql adapter (thanks Joseph Love).
* Added AlterTableGenerator#add_full_text_index method.
* Implemented full_text indexing and searching for PostgreSQL adapter (thanks David Lee).
* Implemented full_text indexing and searching for MySQL adapter (thanks David Lee).
* Fixed Dataset#insert_sql to work with array subscript references (thanks Jim Morris).
=== 1.0.8 (2008-02-08)
* Added support for multiple choices in string matching expressions (#147).
* Renamed Dataset#clone_merge to Dataset#clone, works with or without options for merging (#148).
* Fixed MySQL::Database#<< method to always free the result in order to allow multiple calls in a row (#149). Same also for PostgreSQL adapter.
=== 1.0.7 (2008-02-05)
* Added support for conditional filters (using if else statements) inside block filters (thanks Kee).
=== 1.0.6 (2008-02-05)
* Removed code pollution introduced in revs 814, 817 (really bad patch, IMO).
* Fixed joining datasets using aliased tables (#140).
* Added support additional field types in postgresql adapter (#146).
* Added support for date field types in postgresql adapter (#145).
* Fixed Dataset#count to work correctly for grouped datasets (#144).
* Added Dataset#select_more, Dataset#order_more methods (#129).
=== 1.0.5 (2008-01-25)
* Added support for instantiating models by using the load constructor method.
=== 1.0.4.1 (2008-01-24)
* Fixed bin/sequel to require sequel_model if available.
=== 1.0.4 (2008-01-24)
* Added Dataset#select_all method.
* Changed ODBC::Database to support connection using driver and database name, also added support for untitled columns in ODBC::Dataset (thanks Leonid Borisenko).
* Fixed MySQL adapter to correctly format foreign key definitions (#123).
* Changed MySQL::Dataset to allow HAVING clause on ungrouped datasets, and put HAVING clause before ORDER BY clause (#133).
* Changed Dataset#group_and_count to accept multiple columns (#134).
* Fixed database spec to open YAML file in binary mode (#131).
* Cleaned up gem spec (#132).
* Added Dataset#table_exists? convenience method.
=== 1.0.3 (2008-01-17)
* Added support for UNSIGNED constraint, used in MySQL? (#127).
* Implemented constraint definitions inside Database#create_table.
* Fixed postgres adapter to define PGconn#async_exec as alias to #exec if not defined (for pure-ruby postgres driver).
* Added String#to_date. Updated mysql adapter to use String#to_date for mysql date types (thanks drfreeze).
=== 1.0.2 (2008-01-14)
* Removed ConnectionPool, NumericExtensions. Added dependency on assistance.
=== 1.0.1 (2008-01-12)
* Changed postgres adapter to quote column references using double quotes.
* Applied patch for oracle adapter: fix behavior of limit and offset, transactions, #table_exists?, #tables and additional specs (thanks Liming Lian #122).
* Allow for additional filters on a grouped dataset (#119 and #120)
* Changed mysql adapter to default to localhost if :host option is not specified (#114).
* Refactored Sequelizer to use Proc#to_sexp (method provided by r2r).
* Enhanced Database.connect to accept options with string keys, so it can now accept options loaded from YAML files. Database.connect also automatically converts :username option into :user for compatibility with existing YAML configuration files for AR and DataMapper.
=== 1.0.0.1 (2008-01-03)
* Changed MySQL adapter to support specifying socket option.
* Added support for limiting and paginating datasets with fixed SQL, gotten with DB#fetch (thanks Ruy Diaz).
* Added new Dataset#from_self method that returns a dataset selecting from the original dataset.
=== 1.0 (2008-01-02)
* Removed deprecated adapter stubs.
* Removed Sequel::Model() stub.
* Changed name to sequel_core.
* 100% code coverage.
* Fixed error behavior when sequel_model is not available.
* Fixed error behavior when parse_tree or ruby2ruby are not available.
=== 0.5.0.2 (2008-01-01)
* Fixed String#to_time to raise error correctly for invalid time stamps.
* Improved code coverage - now at 99.2%.
=== 0.5.0.1 (2007-12-31)
* Added a stub for Sequel::Model that auto-loads sequel_model.
* Changed Sequel.method_missing and Database.adapter_class to raise AdapterNotFound if an adapter could not be loaded.
* Fixed behavior of error trap in sequel command line tool.
=== 0.5 (2007-12-30)
* Removed model code into separate sub-project. Rearranged trunk into core, model and model_plugins.
=== 0.4.5 (2007-12-25)
* Added rdoc for new alter_table functionality (#109).
* Fixed update_sql with array sub-item keys (#110).
* Refactored model specs.
* Added Model#update as alias to #set.
* Refactored validations code. Renamed Model.validations? into Model.has_validations?.
* Added initial Model validations (Thanks Lance Carlson)
* Added Database#set_column_default method (thanks Jim Morris.)
* Removed warning on uninitialized @transform value (thanks Jim Morris).
=== 0.4.4.2 (2007-12-20)
* Fixed parsing errors in Ruby 1.9.
* Fixed sync problem in connection_pool_spec.
* Changed String#to_time to raise Error::InvalidValue if Time.parse fails.
* Refactored sequel error classes.
=== 0.4.4.1 (2007-12-19)
* Fixed schema generation code to use field quoting and support adapter-specific literalization of default values (#108).
=== 0.4.4 (2007-12-17)
* Implemented Database#rename_table (#104).
* Fixed drop_index in mysql adapter (#103).
* Added ALTER TABLE specs for postgres, sqlite and mysql adapters. Added custom alter_table behavior for sqlite and mysql adapters (#101, #102).
* Added direct Database API for altering tables.
* Added Database#alter_table method with support for adding, dropping, renaming, modifying columns and adding and droppping indexes.
* Added #unique schema method for defining unique indexes (thanks Dado).
* Implemented unfolding of #each calls inside sequelizer blocks (thanks Jim Morris).
=== 0.4.3 (2007-12-15)
* Fixed Dataset#update to accept strings (#98).
* Fixed Model.[] to raise for boolean argument (#97).
* Added Database#add_index method (thanks coda.hale).
* Added error reporting for filtering on comparison not in a block (thanks Jim Morris).
* Added support for inline index definition (thanks Dado).
* Added Database#create_table! method for forcibly creating a table (thanks Dado).
* Added support for using Dataset#update with block.
* Changed subscript access to use | operator.
* Fixed subscript access in sequelizer.
* Added support for subscript access using Symbol#/ operator.
=== 0.4.2.2 (2007-12-10)
* Improved code coverage.
* Fixed Dataset#count to work properly with datasets with fixed SQL (when using #fetch).
* Added Model.create_with_params method that filters the given parameters accordring to the model's columns (thanks Aman Gupta).
=== 0.4.2.1 (2007-12-09)
* Refactored and fixed Dataset#reverse_order to work with field quoting (thanks Christian).
* Fixed problem with field quoting in insert statements.
* Changed sequelizer code to silently fail on any error when requiring parsetree and ruby2ruby.
* Added Database#create_view, #create_or_replace_view and #drop_view methods. Also implemented Dataset#create_view and #create_or_replace_view convenience methods.
* Keep DRY by re-using Model#[]= from method_missing.
* Added Model.fetch alias for DB.fetch.set_model(Model)
=== 0.4.2 (2007-12-07)
* Implemented Model#save_changes.
* Extended Model#save to accept specific columns to update.
* Implemented experimental JDBC adapter.
* Added adapter skeleton as starting point for new adapters.
* Cleaned-up adapters and moved automatic requiring of 'sequel' to adapter stubs.
=== 0.4.1.3 (2007-12-05)
* Better plugin conventions.
* Added experimental OpenBase adapter.
* Fixed Sequel. methods to accept options hash as well as database name. Fixed Sequel.connect to accept options hash as well as URI (Wayne).
=== 0.4.1.2 (2007-12-04)
* Added release rake task (using RubyForge).
* Changed Model.is to accept variable arity.
* Implemented plugin loading for model classes.
* Fixed odbc-mssql and odbc adapters (thanks Dusty.)
* Implemented odbc-mssql adapter (thanks Dusty.)
=== 0.4.1.1 (2007-11-27)
* Fixed #first and #last functionality in Informix::Dataset (thanks Gerardo Santana).
=== 0.4.1 (2007-11-25)
* Put adapter files in lib/sequel/adapters. Requiring sequel/ is now deprecated. Users can now just require 'sequel' and adapters are automagically loaded (#93).
=== 0.4.0 (2007-11-24)
* Reorganized lib directory structure.
* Added support for dbi-xxx URI schemes (#86).
* Fixed problem in Database#uri where setting the password would raise an error (#87).
* Improved Dataset#insert_sql to correctly handle string keys (#92).
* Improved error-handling for worker threads. Errors are saved to an array and are accessible through #errors (#91).
* Dataset#uniq/distinct can now accept a column list for DISTINCT ON clauses.
* Fixed Model.all.
* Fixed literalization of strings with escape sequences in postgres adapter (#90).
* Added support for literalizing BigDecimal values (#89).
* Fixed column qualification for joined datasets (thanks Christian).
* Implemented experimental informix adapter.
=== 0.3.4.1 (2007-11-10)
* Changed Dataset#select_sql to support queries without a FROM clause.
=== 0.3.4 (2007-11-10)
* Fixed MySQL adapter to allow calling stored procedures (thanks Sebastian).
* Changed Dataset#each to always return self.
* Fixed SQL functions without arguments in block filters.
* Implemented super-cool Symbol#cast_as method.
* Fixed error message in command-line tool if failed to load adapter (#85).
* Refactored code relating to column references for better extendibility (#88).
* Tiny fix to Model#run_hooks.
=== 0.3.3 (2007-11-04)
* Revised code to generate SQL statements without trailing semicolons.
* Added Sequel::Worker implementation of a simple worker thread for asynchronous execution.
* Added spec for Oracle adapter.
* Fixed Oracle adapter to format INSERT statements without semicolons (thanks Liming Lian).
* Renamed alias to Array#keys as Array#columns instead of Array#fields.
* Renamed FieldCompositionMethods as ColumnCompositionMethods.
* Implemented Sequel::NumericExtensions to provide stuff like 30.days.ago.
=== 0.3.2 (2007-11-01)
* Added #to_column_name as alias to #to_field_name, #column_title as alias to #field_title.
* Added Dataset#interval method for getting interval between minimum/maximum values for a column.
* Fixed Oracle::Database#execute (#84).
* Added group_and_count as general implementation for count_by_xxx.
* Added count_by magic method.
* Added Dataset#range method for getting the minimum/maximum values for a column.
* Fixed timestamp translation in SQLite adapter (#83).
* Experimental DB2 adapter.
* Added Dataset#set as alias to Dataset#update.
* Removed long deprecated expressions.rb code.
* Better documentation.
* Implemented Dataset magic methods: order_by_xxx, group_by_xxx, filter_by_xxx, all_by_xxx, first_by_xxx, last_by_xxx.
* Changed Model.create and Model.new to accept a block.
=== 0.3.1 (2007-10-30)
* Typo fixes (#79).
* Added require 'yaml' to dataset.rb (#78).
* Changed postgres adapter to use the ruby-postgres library's type conversion if available (#76).
* Fixed string literalization in mysql adapter for strings with comment backslashes in them (#75).
* Fixed ParseTree dependency to work with version 2.0.0 and later (#74).
* foreign_key definitions now accept :key option for specifying the remote key (#73).
* Fixed Model#method_missing to not raise error for columns not in the table but for which a value exists (#77).
* New documentation for Model.
* Implemented Oracle adapter based on ruby-oci8 library.
* Implemented Model#pk_hash. Is it really necessary?
* Deprecated Model#pkey. Implemented better Model#pk method.
* Specs and docs for Model.one_to_one, Model.one_to_many macros.
=== 0.3.0.1 (2007-10-20)
* Changed Database#fetch to return a modified dataset.
=== 0.3 (2007-10-20)
* Added stock transforms to Dataset#transform. Refactored Model.serialize.
* Added Database#logger= method for setting the database logger object.
* Fixed Model.[] to act as shortcut to Model.find when a hash is given (#71).
* Added support for old and new decimal types in MySQL adapter, and updated MYSQL_TYPES with MySQL 5.0 constants (#72).
* Implemented Database#disconnect method for all adapters.
* Fixed small bug in ArrayKeys module.
* Implemented model caching by primary key.
* Separated Model.find and Model.[] functionality. Model.find takes a filter. Model.[] is strictly for finding by primary keys.
* Enhanced Dataset#first to accept a filter block. Model#find can also now accept a filter block.
* Changed Database#[] to act as shortcut to #fetch if a string is given.
* Renamed Database#each to #fetch. If no block is given, the method returns an enumerator.
* Changed Dataset#join methods to correctly literalize values in join conditions (#70).
* Fixed #filter with ranges to correctly literalize field names (#69).
* Implemented Database#each method for quickly retrieving records with arbitrary SQL (thanks Aman Gupta).
* Fixed bug in postgres adapter where a LiteralString would be literalized as a regular String.
* Fixed SQLite insert with subquery (#68).
* Reverted back to hashes as default mode. Added Sequel.use_array_tuples and Sequel.use_hash_tuples methods.
* Fixed problem with arrays with keys when using #delete.
* Implemented ArrayKeys as substitute for ArrayFields.
* Added Dataset#each_hash method.
* Rewrote SQLite::Database#transaction to use sqlite3-ruby library implementation of transactions.
* Fixed Model.destroy_all to work correctly in cases where no before_destroy hook is defined and an after_destroy hook is defined.
* Restored Model.has_hooks? implementation.
* Changed Database#<< to strip comments and whitespace only when an array is given.
* Changed Schema::Generator#primary_key to accept calls with the type argument omitted.
* Hooks can now be prepended or appended by choice.
* Changed Model.subset to define filter method on the underlying dataset instead of the model class.
* Fixed Dataset#transform to work with array fields.
* Added Dataset#to_csv method.
* PrettyTable can now extract column names from arrayfields.
* Converted ado, dbi, odbc adapters to use arrayfields instead of hashes.
* Fixed composite key support.
* Fixed Dataset#insert_sql, update_sql to support array fields.
* Converted sqlite, mysql, postgres adapters to use arrayfields instead of hashes.
* Extended Dataset#from to auto alias sub-queries.
* Extended Dataset#from to accept hash for aliasing tables.
* Added before_update, after_update hooks.
=== 0.2.1.1 (2007-10-07)
* Added Date literalization to sqlite adapter (#60).
* Changed Model.serialize to allow calling it after the class is defined (#59).
* Fixed after_create hooks to allow calling save inside the hook (#58).
* Fixed MySQL quoting of sql functions (#57).
* Implemented rollback! global method for cancelling transactions in progress.
* Fixed =~ operator in Sequelizer.
* Fixed ODBC::Dataset#fetch_rows (thanks Dusty).
* Renamed Model.recreate_table to create_table!. recreate_table is deprecated and will issue a warning (#56).
=== 0.2.1 (2007-09-24)
* Added default implementation of Model.primary_key_hash.
* Fixed Sequel::Model() to set dataset for inherited classes.
* Rewrote Model.serialize to use Dataset#transform.
* Implemented Dataset#transform.
* Added gem spec for Windows (without ParseTree dependency).
* Added support for dynamic strings in Sequelizer (#49).
* Query branch merged into trunk.
* Implemented self-changing methods.
* Add support for ternary operator to Sequelizer.
* Fixed sequelizer to evaluate expressions if they don't involve symbols or literal strings.
* Added protection against using #each, #delete, #insert, #update inside query blocks.
* Improved Model#method_missing to deal with invalid attributes.
* Implemented Dataset#query.
* Added Dataset#group_by as alias for Dataset#group.
* Added Dataset#order_by as alias for Dataset#order.
* More model refactoring. Added support for composite keys.
* Added Dataset#empty? method (#46).
* Fixed Symbol#to_field_name to support names with numbers and upper-case characters (#45).
* Added install_no_doc rake task.
* Partial refactoring of model code.
* Refactored dataset-model association and added Dataset#set_row_filter method.
* Added support for case-sensitive regexps to mysql adapter.
* Changed mysql adapter to support encoding option as well.
* Added charset/encoding option to postgres adapter.
* Implemented Model.serialize (thanks Aman Gupta.)
* Changed Model.create to INSERT DEFAULT VALUES instead of (id) VALUES (null) (brings back #41.)
* Fixed Model.new to work without arguments.
* Added Model.no_primary_key method to allow models without primary keys.
* Added Model#this method (#42 thanks Duane Johnson).
* Fixed Dataset#insert_sql to use DEFAULT VALUES clause if argument is an empty hash.
* Fixed Model.create to work correctly when no argument is passed (#41).
=== 0.2.0.2 (2007-09-07)
* Dataset#insert can now accept subqueries.
* Changed Migrator.apply to return the version.
* Changed Sequel::Model() to cache intermediate classes so descendant classes can be reopened (#39).
* Added :charset option to MySQL adapter (#40).
* Fixed Dataset#exclude to add parens around NOT expression (#38).
* Fixed use of sub-queries with all comparison operators in block filters (#38).
* Fixed arithmetic expressions in block filters to not be literalized.
* Changed Symbol#method_missing to return LiteralString.
* Changed PrettyTable to right-align numbers.
* Fixed Model.create_table (thanks Duane Johnson.)
=== 0.2.0.1 (2007-09-04)
* Improved support for invoking methods with inline procs inside block filters.
=== 0.2.0 (2007-09-02)
* Fixed Model.drop_table (thanks Duane Johnson.)
* Dataset#each can now return rows for arbitrary SQL by specifying :sql option.
* Added spec for postgres adapter.
* Fixed Model.method_missing to work with new SQL generation.
* Fixed #compare_expr to support regexps.
* Fixed postgres, mysql adapters to support regexps.
* More specs for block filters. Updated README.
* Added support for globals and $X macros in block filters.
* Fixed Sequelizer to not fail if ParseTree or Ruby2Ruby gems are missing.
* Renamed String#expr into String#lit (#expr should be deprecated in future versions).
* Renamed Sequel::ExpressionString into LiteralString.
* Fixed Symbol#[] to return an ExpressionString, so as not to be literalized.
* Renamed Dataset::Expressions to Dataset::Sequelizer.
* Renamed Expressions#format_re_expression to match_expr.
* Renamed Expressions#format_eq_expression to compare_expr.
* Added support for Regexp in MySQL adapter.
* Refactored Regexp expressions into a separate #format_re_expression method.
* Added support for arithmetic in proc filters.
* Added support for nested proc expressions, more specs.
* Added support for SQL function using symbols, e.g. :sum[:x].
* Fixed deadlock bug in ConnectionPool.
* Removed deprecated old expressions.rb.
* Rewrote Proc filter feature using ParseTree.
* Added support for additional functions on columns using Symbol#method_missing.
* Added support for supplying filter block to DB#[] method, to allow stuff like DB[:nodes] {:path =~ /^icex1/}.
=== 0.1.9.12 (2007-08-26)
* Added spec for PrettyTable.
* Added specs for Schema::Generator and Model (#36 thanks technoweenie).
* Fixed Sequel::Model.set_schema (#36 thanks technoweenie.)
* Added support for no options on Schema::Generator#foreign_key (#36 thanks technoweenie.)
* Implemented (restored?) Schema::Generator#primary_key_name (#36 thanks technoweenie.)
* Better spec code coverage.
=== 0.1.9.11 (2007-08-24)
* Changed Dataset#set_model to allow supplying additional arguments to the model's initialize method (#35). Thanks Sunny Hirai.
=== 0.1.9.10 (2007-08-22)
* Changed schema generation code to generate separate statements for CREATE TABLE and each CREATE INDEX (#34).
* Refactored Dataset::SQL#field_name for better support of different field quoting standards by specific adapters.
* Added #current_page_record_count for paginated datasets.
* Removed Database#literal and included Dataset::SQL instead.
* Sequel::Dataset:SQL#field_name can now take a hash (as well as #select and any method that uses #field_name) for aliasing column names. E.g. DB[:test].select(:_qqa => 'Date').sql #=> 'SELECT _qqa AS Date FROM test'.
* Moved SingleThreadedPool to lib/sequel/connection_pool.rb.
* Changed SQLite::Dataset to return affected rows for #delete and #update (#33).
* ADO adapter: Added use of Enumerable for Recordset#Fields, playing it safe and moving to the first row before getting results, and changing the auto_increment constant to work for MSSQL.
=== 0.1.9.9 (2007-08-18)
* New ADO adapter by cdcarter (#31).
* Added automatic column aliasing to #avg, #sum, #min and #max (#30).
* Fixed broken Sequel::DBI::Dataset#fetch_rows (#29 thanks cdcarter.)
=== 0.1.9.8 (2007-08-15)
* Fixed DBI adapter.
=== 0.1.9.7 (2007-08-15)
* Added support for executing batch statements in sqlite adapter.
* Changed #current_page_record_range to return 0..0 for an invalid page.
* Fixed joining of aliased tables.
* Improved Symbol#to_field_name to prevent false positives.
* Implemented Dataset#multi_insert with :commit_every option.
* More docs for Dataset#set_model.
* Implemented automatic creation of convenience methods for each adapter (e.g. Sequel.sqlite etc.)
=== 0.1.9.6 (2007-08-13)
* Refactored schema definition code. Gets rid of famous primary_key problem as well as other issues (e.g. issue #22).
* Added #pagination_record_count, #page_range and #current_page_record_range for paginated datasets.
* Changed MySQL adapter to automatically reconnect (issue #26).
* Changed Sequel() to accept variable arity.
* Added :elements option to column definition, in order to support ENUM and SET types.
=== 0.1.9.5 (2007-08-12)
* Fixed migration docs.
* Removed dependency on PGconn in Schema class.
=== 0.1.9.4 (2007-08-11)
* Added Sequel.dbi convenience method for using DBI connection strings to open DBI databases.
=== 0.1.9.3 (2007-08-10)
* Added support for specifying field size in schema definitions (thanks Florian Assmann.)
* Added migration code based on work by Florian Assmann.
* Reintroduced metaid dependency. No need to keep a local copy of it.
=== 0.1.9.2 (2007-07-24)
* Removed metaid dependency. Re-factored requires in lib/sequel.rb.
=== 0.1.9.1 (2007-07-22)
* Improved robustness of MySQL::Dataset#field_name.
* Added Sequel.single_threaded= convenience method.
=== 0.1.9 (2007-07-21)
* Fixed #update_sql and #insert_sql to support field quoting by calling #field_name.
* Implemented automatic data type conversion in mysql adapter.
* Added support for boolean literals in mysql adapter.
* Added support for ORDER and LIMIT clauses in UPDATE statements in mysql adapter.
* Implemented correct field quoting (using back-ticks) in mysql adapter.
* Wrote basic MySQL spec.
* Fixd MySQL::Dataset to return correct data types with symbols as hash keys.
* Removed discunctional MySQL::Database#transaction.
* Added support for single threaded operation.
* Fixed bug in Dataset#format_eq_expression where Range objects would not be literalized correctly.
* Added parens around postgres LIKE expressions using regexps.
=== 0.1.8 (2007-07-10)
* Implemented Dataset#columns for retrieving the columns in the result set.
* Updated Model with changes to how model-associated datasets work.
* Beefed-up specs. Coverage is now at 95.0%.
* Added support for polymorphic datasets.
* The adapter dataset interface was simplified and standardized. Only four methods need be overriden: fetch_rows, update, insert and delete.
* The Dataset class was refactored. The bulk of the dataset code was moved into separate modules.
* Renamed Dataset#hash_column to Dataset#to_hash.
* Added some common pragmas to sqlite adapter.
* Added Postgres::Dataset#analyze for EXPLAIN ANALYZE queries.
* Fixed broken Postgres::Dataset#explain.
=== 0.1.7
* Removed db.synchronize wrapping calls in sqlite adapter.
* Implemented Model.join method to restrict returned columns to the model table (thanks Pedro Gutierrez).
* Implemented Dataset#paginate method.
* Fixed after_destroy hook.
* Improved Dataset#first and #last to accept a filter hash.
* Added Dataset#[]= method.
* Added Sequel() convenience method.
* Fixed Dataset#first to include a LIMIT clause for a single record.
* Small fix to Postgres driver to return a primary_key value for the inserted record if it is specified in the insertion values (thanks Florian Assmann and Pedro Gutierrez).
* Fixed Symbol#DESC to support qualified notation (thanks Pedro Gutierrez).
=== 0.1.6
* Fixed Model#method_missing to raise for an invalid attribute.
* Fixed PrettyTable to print model objects (thanks snok.)
* Fixed ODBC timestamp conversion to return DateTime rather than Time object (thanks snok.)
* Fixed Model.method_missing (thanks snok.)
* Model.method_missing now creates stubs for calling Model.dataset methods. Methods like Model.each etc. are removed.
* Changed default join type to INNER JOIN (thanks snok.)
* Added support for literal expressions, e.g. DB[:items].filter(:col1 => 'col2 - 10'.expr).
* Added Dataset#and.
* SQLite adapter opens a memory DB if no database is specified, e.g. Sequel.open 'sqlite:/'.
* Added Dataset#or, pretty nifty.
=== 0.1.5
* Fixed Dataset#join to support multiple joins. Added #left_outer_join, #right_outer_join, #full_outer_join, #inner_join methods.
=== 0.1.4
* Added String#split_sql.
* Implemented Array#to_sql and String#to_sql. Database#to_sql can now take an array of strings and convert into an SQL string. Comments and excessive white-space are removed.
* Improved Schema generator to support data types as method names:
DB.create_table :test do
integer :abc
text :def
...
end
* Implemented ODBC adapter.
=== 0.1.3
* Implemented DBI adapter.
* Refactored database connection code. Now handled through Database#connect.
=== 0.1.2
* The first opened database is automatically assigned to to Model.db.
* Removed SequelConnectionError. Exception class errors are converted to RuntimeError.
* Added support for UNION, INTERSECT and EXCEPT set operations.
* Fixed Dataset#single_record to return nil if no record is found.
* Updated specs to conform to RSpec 1.0.
* Added Model#find_or_create method.
* Fixed MySQL::Dataset#query_single (thanks Dries Harnie.)
* Added Model.subset method. Fixed Model.filter and Model.exclude to accept blocks.
* Added Database#uri method.
* Refactored and removed deprecated code in postgres adapter.
===0.1.1
* More documentation for Dataset.
* Added Dataset#size as alias to Dataset#count.
* Changed Database#<< to call execute (instead of being an alias). Thus it will work for descendants as well.
* Fixed Sequel.open to accept variable arity.
* Refactored Model#refresh, Model.create. Removed Model#reload.
* Refactored Model hooks.
* Cleaned up Dataset API.
=== 0.1.0
* Changed Database#create_table to only accept a block. Nobody's gonna use the other way.
* Removed Dataset#[]= method. Too confusing and not really useful.
* Fixed ConnectionPool#hold to wrap exceptions only once.
* Dataset#where_list Renamed Dataset#expression_list.
* Added support for qualified fields in Proc expressions (e.g. filter {items.id == 1}.)
* Added like? and in? Proc expression operators.
* Added require 'date' in dataset.rb. Is this a 1.8.5 thing?
* Refactored Dataset to use literal strings instead of format strings (slight performance improvement and better readability.)
* Added support for literalizing Date objects.
* Refactored literalization of Time objects.
=== 0.0.20
* Refactored Dataset where clause construction to use expressions.
* Implemented Proc expressions (adapted from a great idea by Sam Smoot.)
* Fixed Model#map.
* Documentation for ConnectionPool.
* Specs for Database.
=== 0.0.19
* More specs for Dataset.
* Fixed Dataset#invert_order to work correctly with strings.
* Fixed Model#== to check equality of values.
* Added Model#exclude and Model#order.
* Fixed Dataset#order and Dataset#group to behave correctly when supplied with qualified field name symbols.
* Removed Database#literal. Shouldn't have been there.
* Added SQLite::Dataset#explain. Returns an array of opcode hashes.
* Specs for ConnectionPool.
=== 0.0.18
* Implemented SequelError and SequelConnectionError classes. ConnectionPool#hold now catches any connection errors and reraises them SequelConnectionError.
* Removed duplication in Database#[].
* :from and :select options are now always arrays (patch by Alex Bradbury.)
* Fixed Dataset#exclude to work correctly (patch and specs by Alex Bradbury.)
=== 0.0.17
* Fixed Postgres::Database#tables to return table names as symbols (caused problem when using Database#table_exists?).
* Fixed Dataset#from to have variable arity, like Dataset#select and Dataset#where (patch by Alex Bradbury.)
* Added support for GROUP BY and HAVING clauses (patches by Alex Bradbury.) Refactored Dataset#filter.
* More specs.
* Refactored Dataset#where for better composability.
* Added Dataset#[]= method.
* Added support for DISTINCT and OFFSET clauses (patches by Alex Bradbury.) Dataset#limit now accepts ranges. Added Dataset#uniq and distinct methods.
=== 0.0.16
* More documentation.
* Added support for subqueries in Dataset#literal.
* Added support for Model.all_by_XXX methods through Model.method_missing.
* Added basic SQL logging to Database.
* Added Enumerable#send_each convenience method.
* Changed Dataset#destroy to return the number of deleted records.
=== 0.0.15
* Improved Dataset#insert_sql to allow arrays as well as hashes.
* Database#drop_table now accepts a list of table names.
* Added Model#id to to return the id column.
=== 0.0.14
* Fixed Model's attribute accessors (hopefully for the last time).
* Changed Model.db and Model.db= to allow different databases for different model classes.
* Fixed bug in aggregate methods (max, min, etc.) for datasets using record classes.
=== 0.0.13
* Fixed Model#method_missing to do both find, filter and attribute accessors. duh.
* Fixed bug in Dataset#literal when quoting arrays of strings (thanks Douglas Koszerek.)
=== 0.0.12
* Model#save now correctly performs an INSERT for new objects.
* Added Model#reload for reloading an object from the database.
* Added Dataset#naked method for getting a version of a dataset that fetches records as hashes.
* Implemented attribute accessors for column values ala ActiveRecord models.
* Fixed filtering using nil values (e.g. dataset.filter(:parent_id => nil)).
=== 0.0.11
* Renamed Model.schema to Model.set_schema and Model.get_schema to Model.schema.
* Improved Model class to allow descendants of model clases (thanks Pedro Gutierrez.)
* Removed require 'postgres' in schema.rb (thanks Douglas Koszerek.)
=== 0.0.10
* Added some examples.
* Added Dataset#print method for pretty-printing tables.
=== 0.0.9
* Fixed Postgres::Database#tables and #locks methods.
* Added PGconn#last_insert_id method that should support all 7.x and 8.x versions of Postgresql.
* Added Dataset#exists method for EXISTS where clauses.
* Changed behavior of Dataset#literal to regard symbols as field names.
* Refactored and DRY'd Dataset#literal and overrides therof. Added support for subqueries in where clause.
=== 0.0.8
* Fixed Dataset#reverse_order to provide chainability. This method can be called without arguments to invert the current order or with arguments to provide a descending order.
* Fixed literal representation of literals in SQLite adapter (thanks Christian Neukirchen!)
* Refactored insert code in Postgres adapter (in preparation for fetching the last insert id for pre-8.1 versions).
=== 0.0.7
* Fixed bug in Model.schema, duh!
=== 0.0.6
* Added Dataset#sql as alias to Dataset#select_sql.
* Dataset#where and Dataset#exclude can now be used for refining dataset conditions, enabling stuff like posts.where(:title => 'abcdef').exclude(:user_id => 3).
* Implemented Dataset#exclude method.
* Added Sequel::Schema#auto_primary_key method for setting an automatic primary key to be added to every table definition. Changed the schema generator to not define a primary key by default.
* Changed Sequel::Database#table_exists? to rely on the tables method if it is available.
* Implemented SQLite::Database#tables.
=== 0.0.5
* Added Dataset#[] method. Refactored Model#find and Model#[].
* Renamed Pool#conn_maker to Pool#connection_proc.
* Added automatic require 'sequel' to all adapters for convenience.
=== 0.0.4
* Added preliminary MySQL support.
* Code cleanup.
=== 0.0.3
* Add Dataset#sum method.
* Added support for exclusive ranges (thanks Christian Neukirchen.)
* Added sequel console for quick'n'dirty access to databases.
* Fixed small bug in Dataset#qualified_field_name for better join support.
=== 0.0.2
* Added Sequel.open as alias to Sequel.connect.
* Refactored Dataset#where_equal_condition into Dataset#where_condition, allowing arrays and ranges, e.g. posts.filter(:stamp => (3.days.ago)..(1.day.ago)), or posts.filter(:category => ['ruby', 'postgres', 'linux']).
* Added Model#[]= method for changing column values and Model#save
method for saving them.
* Added Dataset#destroy for deleting each record individually as support for models. Renamed Model#delete to Model#destroy (and Model#destroy_all) ala ActiveRecord.
* Refactored Dataset#first and Dataset#last code. These methods can now accept the number of records to fetch.
=== 0.0.1
* More documentation for Dataset.
* Renamed Database#query to Database#dataset.
* Added Dataset#insert_multiple for inserting multiple records.
* Added Dataset#<< as shorthand for inserting records.
* Added Database#<< method for executing arbitrary SQL.
* Imported Sequel code.
== Sequel::Model CHANGELOG 0.1 - 0.5.0.2
=== 0.5.0.2 (2008-03-12)
* More fixes for Model.associate to accept strings and symbols as class references.
=== 0.5.0.1 (2008-03-09)
* Fixed Model.associate to accept class and class name in :class option.
=== 0.5 (2008-03-08)
* Merged new associations branch into trunk.
* Rewrote RDoc for associations.
* Added has_and_belongs_to_many alias for many_to_many.
* Added support for optional dataset block.
* Added :order option to order association datasets.
* Added :cache option to return and cache array of objects for association.
* Changed one_to_many, many_to_many associations to return dataset by default.
* Added has_many, belongs_to aliases.
* Refactored associations code.
* Added deprecations for old-style relations.
* Completed specs for new associations code.
* New associations code by Jeremy Evans (replaces relations code.)
=== 0.4.2 (2008-02-29)
* Fixed one_to_many implicit key to work correctly for namespaced classes (#167).
* Fixed Model.db= to affect the underlying dataset (#183).
* Fixed Model.implicit_table_name to disregard namespaces.
=== 0.4.1 (2008-02-10)
* Implemented Model#inspect (#151).
* Changed Model#method_missing to short-circuit and bypass checking #columns if the values hash already contains the relevant column (#150).
* Updated to reflect changes in sequel_core (Dataset#clone_merge renamed to Dataset#clone).
=== 0.4 (2008-02-05)
* Fixed Model#set to work with string keys (#143).
* Fixed Model.create to correctly initialize instances marked as new (#135).
* Fixed Model#initialize to convert string keys into symbol keys. This also fixes problem with validating objects initialized with string keys (#136).
=== 0.3.3 (2008-01-25)
* Finalized support for virtual attributes.
=== 0.3.2.1 (2008-01-24)
* Fixed Model.dataset to correctly set the dataset if using implicit naming or inheriting the superclass dataset (thanks celldee).
=== 0.3.2 (2008-01-24)
* Added Model#update_with_params method with support for virtual attributes and auto-filtering of unrelated parameters, and changed Model.create_with_params to support virtual attributes (#128).
* Cleaned up gem spec (#132).
* Removed validations code. Now relying on validations in assistance gem.
=== 0.3.1 (2008-01-21)
* Changed Model.dataset to use inflector to pluralize the class name into the table name. Works in similar fashion to table names in AR or DM.
=== 0.3 (2008-01-18)
* Implemented Validatable::Errors class.
* Added Model#reload as alias to Model#refresh.
* Changed Model.create to accept a block (#126).
* Rewrote validations.
* Fixed Model#initialize to accept nil values (#115).
=== 0.2 (2008-01-02)
* Removed deprecated Model.recreate_table method.
* Removed deprecated :class and :on options from one_to_many macro.
* Removed deprecated :class option from one_to_one macro.
* Removed deprecated Model#pkey method.
* Changed dependency to sequel_core.
* Removed examples from sequel core.
* Additional specs. We're now at 100% coverage.
* Refactored hooks code. Hooks are now inheritable, and can be defined by supplying a block or a method name, or by overriding the hook instance method. Hook chains can now be broken by returning false (#111, #112).
=== 0.1 (2007-12-30)
* Moved model code from sequel into separate model sub-project.
sequel-5.63.0/doc/advanced_associations.rdoc 0000664 0000000 0000000 00000107764 14342141206 0021061 0 ustar 00root root 0000000 0000000 = Advanced Associations
Sequel::Model's association support is powerful and flexible, but it can be difficult for
new users to understand what the support enables. This guide shows off some of the more
advanced Sequel::Model association features.
You should probably review the {Model Associations Basics and Options guide}[rdoc-ref:doc/association_basics.rdoc]
before reviewing this guide.
== Sequel::Model Eager Loading
Sequel::Model offers two different ways to perform eager loading, +eager+ and
+eager_graph+. +eager+ uses an SQL query per association, +eager_graph+ uses a single
SQL query containing JOINs.
Assuming the following associations:
Artist.one_to_many :albums
Album.one_to_many :tracks
Tracks.many_to_one :lyric
Let's say you wanted to load all artists and eagerly load the related albums, tracks, and lyrics.
Artist.eager(albums: {tracks: :lyric})
# 4 Queries:
# SELECT * FROM artists;
# SELECT * FROM albums WHERE (artist_id IN (...));
# SELECT * FROM tracks WHERE (album_id IN (...));
# SELECT * FROM lyrics WHERE (id IN (...));
Artist.eager_graph(albums: {tracks: :lyric})
# 1 Query:
# SELECT artists.id, artists.name, ...
# albums.id AS albums_id, albums.name AS albums_name, ...
# tracks.id AS tracks_id, tracks.name AS tracks_name, ...
# lyric.id AS lyric_id, ...
# FROM artists
# LEFT OUTER JOIN albums ON (albums.artist_id = artists.id)
# LEFT OUTER JOIN tracks ON (tracks.album_id = albums.id)
# LEFT OUTER JOIN lyrics AS lyric ON (lyric.id = tracks.lyric_id);
In general, the recommendation is to use +eager+ unless you have a reason to use +eager_graph+.
+eager_graph+ is needed when you want to reference columns in an associated table. For example,
if you want to order the loading of returned artists based on the names of the albums, you cannot
do:
Artist.eager(albums: {tracks: :lyric}).order{albums[:name]}
because the initial query Sequel will use would be:
# SELECT * FROM artists ORDER BY albums.name;
and +albums+ is not a valid qualifier in such a query. In this situation, you must use +eager_graph+:
Artist.eager_graph(albums: {tracks: :lyric}).order{albums[:name]}
Whether +eager+ or +eager_graph+ performs better is association and database dependent. If
you are concerned about performance, you should try benchmarking both cases with appropriate
data to see which performs better.
=== Mixing eager and eager_graph
Sequel offers the ability to mix +eager+ and +eager_graph+ when loading results. This can
be done at the main level by calling both +eager+ and +eager_graph+ on the same dataset:
Album.eager(:artist).eager_graph(:tracks)
# 2 Queries:
# SELECT albums.id, albums.name, ...
# artist.id AS artist_id, artist.name AS artist_name, ...
# FROM albums
# LEFT OUTER JOIN artists AS artist ON (artist.id = albums.artist_id);
# SELECT * FROM artists WHERE (id IN (...));
You can also use +eager+ to load initial associations, and +eager_graph+ to load
remaining associations, by using +eager_graph+ in an eager load callback:
Artist.eager(albums: {tracks: proc{|ds| ds.eager_graph(:lyric)}})
# 3 Queries:
# SELECT * FROM artists;
# SELECT * FROM albums WHERE (artist_id IN (...));
# SELECT tracks.id, tracks.name, ...
# lyric.id AS lyric_id, ...
# FROM tracks
# LEFT OUTER JOIN lyrics AS lyric ON (lyric.id = tracks.lyric_id)
# WHERE (tracks.album_id IN (...));
Using the +eager_graph_eager+ plugin, you can use +eager_graph+ to load the
initial associations, and +eager+ to load the remaining associations. When
you call +eager_graph_eager+, you must specify the dependency chain at
which to start the eager loading via +eager+:
Artist.plugin :eager_graph_eager
Artist.eager_graph(albums: :tracks).eager_graph_eager([:albums, :tracks], :lyric)
# 2 Queries:
# SELECT artists.id, artists.name, ...
# albums.id AS albums_id, albums.name AS albums_name, ...
# tracks.id AS tracks_id, tracks.name AS tracks_name, ...
# FROM artists
# LEFT OUTER JOIN albums ON (albums.artist_id = artists.id)
# LEFT OUTER JOIN tracks ON (tracks.album_id= albums.id);
# SELECT * FROM lyrics WHERE (id IN (...));
These two approaches can also be nested, with +eager+ -> +eager_graph+ -> +eager+:
Album.plugin :eager_graph_eager
Artist.eager(albums: proc{|ds| ds.eager_graph(:tracks).eager_graph_eager([:tracks], :lyric)})
# 3 Queries:
# SELECT * FROM artists;
# SELECT albums.id, albums.name, ...
# tracks.id AS tracks_id, tracks.name AS tracks_name, ...
# FROM albums
# LEFT OUTER JOIN tracks ON (tracks.album_id = albums.id)
# WHERE (albums.artist_id IN (...));
# SELECT * FROM lyrics WHERE (id IN (...));
Or with 2 separate +eager_graph+ queries:
Artist.eager_graph(:albums).eager_graph_eager([:albums], tracks: proc{|ds| ds.eager_graph(:lyric)})
# 2 Queries:
# SELECT artists.id, artists.name, ...
# albums.id AS albums_id, albums.name AS albums_name, ...
# FROM artists
# LEFT OUTER JOIN albums ON (albums.artist_id = artists.id);
# SELECT tracks.id, tracks.name, ...
# lyric.id AS lyric_id, ...
# FROM tracks
# LEFT OUTER JOIN lyrics AS lyric ON (lyric.id = tracks.lyric_id)
# WHERE (tracks.album_id IN (...));
== Sequel::Model Association Loading Options
There are a bunch of advanced association options that are available to
handle more complex cases. First we'll go over some of the simpler ones:
All associations take a block that can be used to further filter/modify the
default dataset:
Artist.one_to_many :gold_albums, class: :Album do |ds|
ds.where{copies_sold > 500000}
end
There's also an :eager_block option if you want to use a different block when
eager loading via Dataset#eager.
There are many options for changing how the association is eagerly
loaded via Dataset#eager_graph:
:graph_join_type :: The type of join to do (:inner, :left, :right)
:graph_conditions :: Additional conditions to put on join (needs to be a
hash or array of all two pairs). Automatically assumes unqualified symbols
or first element of the pair to be columns of the associated model, and
unqualified symbols of the second element of the pair to be columns of the
current model.
:graph_block :: A block passed to +join_table+, allowing you to specify
conditions other than equality, or to use OR, or set up any arbitrary
condition. The block is passed the associated table alias, current table
alias, and an array of previous joins clause objects.
:graph_only_conditions :: Use these conditions instead of the standard
association conditions. This is necessary when you don't want to have an
equal condition between the foreign key and primary key of the tables.
You can also use this to have a JOIN USING (array of symbols), or a NATURAL
or CROSS JOIN (nil, with the appropriate :graph_join_type).
These can be used like this:
# Makes Artist.eager_graph(:required_albums).all not return artists that
# don't have any albums
Artist.one_to_many :required_albums, class: :Album, graph_join_type: :inner
# Makes sure all returned albums have the active flag set
Artist.one_to_many :active_albums, class: :Album, graph_conditions: {active: true}
# Only returns albums that have sold more than 500,000 copies
Artist.one_to_many :gold_albums, class: :Album,
graph_block: proc{|j,lj,js| Sequel[j][:copies_sold] > 500000}
# Handles the case where the tables are associated by a case insensitive name string
Artist.one_to_many :albums, key: :artist_name,
graph_only_conditions: nil,
graph_block: proc{|j,lj,js| {Sequel.function(:lower, Sequel[j][:artist_name])=>Sequel.function(:lower, Sequel[lj][:name])}}
# Handles the case where both key columns have the name artist_name, and you want to use
# a JOIN USING
Artist.one_to_many :albums, key: :artist_name, graph_only_conditions: [:artist_name]
One advantage of using +eager_graph+ is that you can easily filter/order
on columns in an associated table on a per-query basis, using regular
Sequel dataset methods. For example, if you only want to retrieve artists
who have albums that start with A, and eager load just those albums,
ordered by the albums name, you can do:
albums = Artist.
eager_graph(:albums).
where{Sequel.like(albums[:name], 'A%')}.
order{albums[:name]}.
all
For lazy loading (e.g. Model[1].association), the :dataset option can be used
to specify an arbitrary dataset (one that uses different keys, multiple keys,
joins to other tables, etc.).
== Custom Eager Loaders
For eager loading via +eager+, the :eager_loader option can be used to specify
how to eagerly load a complex association. This is an extremely powerful
option. Though it can often be verbose (compared to other things in Sequel),
it allows you complete control over how to eagerly load associations for a
group of objects.
:eager_loader should be a proc that takes a single hash argument, which will
have at least the following keys:
:id_map :: A mapping of key values to arrays of current model instances,
usage described below
:rows :: An array of model objects
:associations :: A hash of dependent associations to eagerly load
:self :: The dataset that is doing the eager loading
:eager_block :: A dynamic callback for this eager load.
Since you are given all of the records, you can do things like filter on
associations that are specified by multiple keys, or do multiple
queries depending on the content of the records (which would be
necessary for polymorphic associations). Inside the :eager_loader
proc, you should get the related objects and populate the
associations cache for all objects in the array of records. The hash
of dependent associations is available for you to cascade the eager
loading down multiple levels, but it is up to you to use it.
The id_map is a performance enhancement that is used by the default
association loaders and is also available to you. It is a hash with keys
foreign/primary key values, and values being arrays of current model
objects having the foreign/primary key value associated with the key.
This may be hard to visualize, so I'll give an example. Let's say you
have the following associations
Album.many_to_one :artist
Album.one_to_many :tracks
and the following three albums in the database:
album1 = Album.create(artist_id: 3) # id: 1
album2 = Album.create(artist_id: 3) # id: 2
album3 = Album.create(artist_id: 2) # id: 3
If you try to eager load this dataset:
Album.eager(:artist, :tracks).all
Then the id_map provided to the artist :eager_loader proc would be:
{3=>[album1, album2], 2=>[album3]}
The artist id_map contains a mapping of artist_id values to arrays of
album objects. Since both album1 and album2 have the same artist_id,
the are both in the array related to that key. album3 has a different
artist_id, so it is in a different array. Eager loading of artists is
done by looking for any artist having one of the keys in the hash:
artists = Artist.where(id: id_map.keys).all
When the artists are retrieved, you can iterate over them, find entries
with matching keys, and manually associate them to the albums:
artists.each do |artist|
# Find related albums using the artist_id_map
if albums = id_map[artist.id]
# Iterate over the albums
albums.each do |album|
# Manually set the artist association for each album
album.associations[:artist] = artist
end
end
end
The id_map provided to the tracks :eager_loader proc would be:
{1=>[album1], 2=>[album2], 3=>[album3]}
Now the id_map contains a mapping of id values to arrays of album objects (in this
case each array only has a single object, because id is the primary key). So when
looking for tracks to eagerly load, you only need to look for ones that have an
album_id with one of the keys in the hash:
tracks = Track.where(album_id: id_map.keys).all
When the tracks are retrieved, you can iterate over them, find entries with matching
keys, and manually associate them to the albums:
tracks.each do |track|
if albums = id_map[track.album_id]
albums.each do |album|
album.associations[:tracks] << track
end
end
end
=== Two basic example eager loaders
Putting the code in the above examples together, you almost have enough for a basic
working eager loader. The main important thing that is missing is you need to set
initial values for the eagerly loaded associations. For the artist association, you
need to initial the values to nil:
# rows here is the :rows entry in the hash passed to the eager loader
rows.each{|album| album.associations[:artist] = nil}
For the tracks association, you set the initial value to an empty array:
rows.each{|album| album.associations[:track] = []}
These are done so that if an album currently being loaded doesn't have an associated
artist or any associated tracks, the lack of them will be cached, so calling the
artist or tracks method on the album will not do another database lookup.
So putting everything together, the artist eager loader looks like:
Album.many_to_one :artist, eager_loader: (proc do |eo_opts|
eo_opts[:rows].each{|album| album.associations[:artist] = nil}
id_map = eo_opts[:id_map]
Artist.where(id: id_map.keys).all do |artist|
if albums = id_map[artist.id]
albums.each do |album|
album.associations[:artist] = artist
end
end
end
end)
and the tracks eager loader looks like:
Album.one_to_many :tracks, eager_loader: (proc do |eo_opts|
eo_opts[:rows].each{|album| album.associations[:tracks] = []}
id_map = eo_opts[:id_map]
Track.where(album_id: id_map.keys).all do |track|
if albums = id_map[track.album_id]
albums.each do |album|
album.associations[:tracks] << track
end
end
end
end)
Now, these are both overly simplistic eager loaders that don't respect cascaded
associations or any of the association options. But hopefully they both
provide simple examples that you can more easily build and learn from, as
the custom eager loaders described later in this page are more complex.
Basically, the eager loading steps can be broken down into:
1. Set default association values (nil/[]) for each of the current objects
2. Return just related associated objects by filtering the associated class
to include only rows with keys present in the id_map.
3. Iterating over the returned associated objects, indexing into the id_map
using the foreign/primary key value in the associated object to get
current values associated to that specific object.
4. For each of those current values, updating the cached association value to
include that specific object.
Using the :eager_loader proc, you should be able to eagerly load all associations
that can be eagerly loaded, even if Sequel doesn't natively support such eager
loading.
== Limited Associations
Sequel supports specifying limits and/or offsets for associations:
Artist.one_to_many :first_10_albums, class: :Album, order: :release_date, limit: 10
For retrieving the associated objects for a single object, this just uses
a LIMIT:
artist.first_10_albums
# SELECT * FROM albums WHERE (artist_id = 1) LIMIT 10
=== Eager Loading via eager
However, if you want to eagerly load an association, you must use a different
approach. Sequel has 4 separate strategies for dealing with such cases.
The default strategy used on all databases is a UNION-based approach, which
will submit multiple subqueries in a UNION query:
Artist.where(id: [1,2]).eager(:first_10_albums).all
# SELECT * FROM (SELECT * FROM albums WHERE (artist_id = 1) LIMIT 10) UNION ALL
# SELECT * FROM (SELECT * FROM albums WHERE (artist_id = 2) LIMIT 10)
This is the fastest way to load the associated objects on most databases, as long as
there is an index on albums.artist_id. Without an index it is probably the slowest
approach, so make sure you have an index on the key columns. If you cannot add an
index, you'll want to manually specify the :eager_limit_strategy option as shown below.
On PostgreSQL, for *_one associations that don't use an offset, you can
choose to use a the distinct on strategy:
Artist.one_to_one :first_album, class: :Album, order: :release_date,
eager_limit_strategy: :distinct_on
Artist.where(id: [1,2]).eager(:first_album).all
# SELECT DISTINCT ON (albums.artist_id) *
# FROM albums
# WHERE (albums.artist_id IN (1, 2))
# ORDER BY albums.artist_id, release_date
Otherwise, if the database supports window functions, you can choose to use
the window function strategy:
Artist.one_to_many :first_10_albums, class: :Album, order: :release_date, limit: 10,
eager_limit_strategy: :window_function
Artist.where(id: [1,2]).eager(:first_10_albums).all
# SELECT * FROM (
# SELECT *, row_number() OVER (PARTITION BY albums.artist_id ORDER BY release_date) AS x_sequel_row_number_x
# FROM albums
# WHERE (albums.artist_id IN (1, 2))
# ) AS t1
# WHERE (x_sequel_row_number_x <= 10)
Alternatively, you can use the :ruby strategy, which will fall back to
retrieving all records, and then will slice the resulting array to get
the first 10 after retrieval.
=== Dynamic Eager Loading Limits
If you need to eager load variable numbers of records (with limits that aren't
known at the time of the association definition), Sequel supports an
:eager_limit dataset option that can be defined in an eager loading callback:
Artist.one_to_many :albums
Artist.where(id: [1, 2]).eager(albums: lambda{|ds| ds.order(:release_date).clone(eager_limit: 3)}).all
# SELECT * FROM (
# SELECT *, row_number() OVER (PARTITION BY albums.artist_id ORDER BY release_date) AS x_sequel_row_number_x
# FROM albums
# WHERE (albums.artist_id IN (1, 2))
# ) AS t1
# WHERE (x_sequel_row_number_x <= 3)
You can also customize the :eager_limit_strategy on a case-by-case basis by passing in that option in the same way:
Artist.where(id: [1, 2]).eager(albums: lambda{|ds| ds.order(:release_date).clone(eager_limit: 3, eager_limit_strategy: :ruby)}).all
# SELECT * FROM albums WHERE (albums.artist_id IN (1, 2)) ORDER BY release_date
The :eager_limit and :eager_limit_strategy options currently only work when
eager loading via #eager, not with #eager_graph.
=== Eager Loading via eager_graph_with_options
When eager loading an association via eager_graph (which uses JOINs), the
situation is similar. While the UNION-based strategy cannot be used as
you don't know the records being eagerly loaded in advance, Sequel can use
a variant of the other 3 strategies. By default it retrieves all records
and then does the array slice in ruby. As eager_graph does not support
options, to use an eager_graph limit strategy you have to use the
eager_graph_with_options method with the :limit_strategy option.
The :distinct_on strategy uses DISTINCT ON in a subquery and JOINs that
subquery:
Artist.eager_graph_with_options(:first_album, limit_strategy: :distinct_on).all
# SELECT artists.id, artists.name, first_album.id AS first_album_id,
# first_album.name AS first_album_name, first_album.artist_id,
# first_album.release_date
# FROM artists
# LEFT OUTER JOIN (
# SELECT DISTINCT ON (albums.artist_id) *
# FROM albums
# ORDER BY albums.artist_id, release_date
# ) AS first_album ON (first_album.artist_id = artists.id)
The :window_function approach JOINs to a nested subquery using a window
function:
Artist.eager_graph_with_options(:first_10_albums, limit_strategy: :window_function).all
# SELECT artists.id, artists.name, first_10_albums.id AS first_10_albums_id,
# first_10_albums.name AS first_10_albums_name, first_10_albums.artist_id,
# first_10_albums.release_date
# FROM artists
# LEFT OUTER JOIN (
# SELECT id, name, artist_id, release_date
# FROM (
# SELECT *, row_number() OVER (PARTITION BY tracks.album_id ORDER BY release_date) AS x_sequel_row_number_x
# FROM albums
# ) AS t1 WHERE (x_sequel_row_number_x <= 10)
# ) AS first_10_albums ON (first_10_albums.artist_id = artists.id)
The :correlated_subquery approach JOINs to a nested subquery using a correlated
subquery:
Artist.eager_graph_with_options(:first_10_albums, limit_strategy: :correlated_subquery).all
# SELECT artists.id, artists.name, first_10_albums.id AS first_10_albums_id,
# first_10_albums.name AS first_10_albums_name, first_10_albums.artist_id,
# first_10_albums.release_date
# FROM artists
# LEFT OUTER JOIN (
# SELECT *
# FROM albums
# WHERE albums.id IN (
# SELECT t1.id
# FROM tracks AS t1
# WHERE (t1.album_id = tracks.album_id)
# ORDER BY release_date
# LIMIT 10
# )
# ) AS first_10_albums ON (first_10_albums.artist_id = artists.id)
The reason that Sequel does not automatically use the :distinct_on, :window function
or :correlated_subquery strategy for eager_graph is that it can perform much worse than the
default of just doing the array slicing in ruby. If you are only using eager_graph to
return a few records, it may be cheaper to get all of their associated records and filter
them in ruby as opposed to computing the set of limited associated records for all rows.
It's recommended to only use an eager_graph limit strategy if you have benchmarked
it against the default behavior and found it is faster for your use case.
=== Filtering By Associations
In order to return correct results, Sequel automatically uses a limit strategy when
using filtering by associations with limited associations, if the database supports
it. As in the eager_graph case, the UNION-based strategy doesn't work. Unlike
in the eager and eager_graph cases, the array slicing in ruby approach does not work,
you must use an SQL-based strategy. Sequel will select an appropriate default
strategy based on the database you are using, and you can override it using the
:filter_limit_strategy option.
The :distinct_on strategy:
Artist.where(first_album: Album[1]).all
# SELECT *
# FROM artists
# WHERE (artists.id IN (
# SELECT albums.artist_id
# FROM albums
# WHERE ((albums.artist_id IS NOT NULL) AND (albums.id IN (
# SELECT DISTINCT ON (albums.artist_id) albums.id
# FROM albums
# ORDER BY albums.artist_id, release_date
# )) AND (albums.id = 1))))
The :window_function strategy:
Artist.where(first_10_albums: Album[1]).all
# SELECT *
# FROM artists
# WHERE (artists.id IN (
# SELECT albums.artist_id
# FROM albums
# WHERE ((albums.artist_id IS NOT NULL) AND (albums.id IN (
# SELECT id FROM (
# SELECT albums.id, row_number() OVER (PARTITION BY albums.artist_id ORDER BY release_date) AS x_sequel_row_number_x
# FROM albums
# ) AS t1
# WHERE (x_sequel_row_number_x <= 10)
# )) AND (albums.id = 1))))
The :correlated_subquery strategy:
Artist.where(first_10_albums: Album[1]).all
# SELECT *
# FROM artists
# WHERE (artists.id IN (
# SELECT albums.artist_id
# FROM albums
# WHERE ((albums.artist_id IS NOT NULL) AND (albums.id IN (
# SELECT t1.id
# FROM albums AS t1
# WHERE (t1.artist_id = albums.artist_id)
# ORDER BY release_date
# LIMIT 1
# )) AND (albums.id = 1))))
Note that filtering by limited associations does not work on MySQL, as MySQL does not support
any of the strategies. It's also not supported when using composite keys on databases
that don't support window functions and don't support multiple columns in IN.
=== Additional Association Types
While the above examples for limited associations showed one_to_many and one_to_one associations,
it's just because those are the simplest examples. Sequel supports all of the same features for
many_to_many and one_through_one associations that are enabled by default, as well as the
many_through_many and one_through_many associations that are added by the many_through_many
plugin.
== More advanced association examples
=== Association extensions
All associations come with an association_dataset method that can be further filtered or
otherwise modified:
class Author < Sequel::Model
one_to_many :authorships
end
Author.first.authorships_dataset.where{number < 10}.first
You can extend a dataset with a module using the :extend association option. You can reference
the model object that created the association dataset via the dataset's
+model_object+ method, and the related association reflection via the dataset's
+association_reflection+ method:
module FindOrCreate
def find_or_create(vals)
first(vals) || model.create(vals.merge(association_reflection[:key]=>model_object.id))
end
end
class Author < Sequel::Model
one_to_many :authorships, extend: FindOrCreate
end
Author.first.authorships_dataset.find_or_create(name: 'Blah', number: 10)
=== many_to_many associations through model tables
The many_to_many association can be used even when the join table is a table used for a
model. The only requirement is the join table has foreign keys to both the current
model and the associated model. Anytime there is a one_to_many association from model A to
model B, and model B has a many_to_one association to model C, you can use a many_to_many
association from model A to model C.
class Author < Sequel::Model
one_to_many :authorships
many_to_many :books, join_table: :authorships
end
class Authorship < Sequel::Model
many_to_one :author
many_to_one :book
end
@author = Author.first
@author.books
=== many_to_many for three-level associations
You can even use a many_to_many association between model A and model C if model A has a
one_to_many association to model B, and model B has a one_to_many association to model C.
You just need to use the appropriate :right_key and :right_primary_key options. And in
the reverse direction from model C to model A, you can use a one_through_one association
using the :left_key and :left_primary_key options.
class Firm < Sequel::Model
one_to_many :clients
many_to_many :invoices, join_table: :clients, right_key: :id, right_primary_key: :client_id
end
class Client < Sequel::Model
many_to_one :firm
one_to_many :invoices
end
class Invoice < Sequel::Model
many_to_one :client
one_through_one :firm, join_table: :clients, left_key: :id, left_primary_key: :client_id
end
Firm.first.invoices
Invoice.first.firm
To handle cases where there are multiple join tables, you can use the many_through_many
plugin that ships with Sequel.
=== Polymorphic Associations
Sequel discourages the use of polymorphic associations, which is the reason they
are not supported by default. All polymorphic associations can be made non-polymorphic
by using additional tables and/or columns instead of having a column
containing the associated class name as a string.
Polymorphic associations break referential integrity and are significantly more
complex than non-polymorphic associations, so their use is not recommended unless
you are stuck with an existing design that uses them.
If you must use them, look for the sequel_polymorphic external plugin, as it makes using
polymorphic associations in Sequel about as easy as it is in ActiveRecord. However,
here's how they can be done using Sequel's custom associations (the sequel_polymorphic
external plugin is just a generic version of this code):
class Asset < Sequel::Model
many_to_one :attachable, reciprocal: :assets,
setter: (lambda do |attachable|
self[:attachable_id] = (attachable.pk if attachable)
self[:attachable_type] = (attachable.class.name if attachable)
end),
dataset: (proc do
klass = attachable_type.constantize
klass.where(klass.primary_key=>attachable_id)
end),
eager_loader: (lambda do |eo|
id_map = {}
eo[:rows].each do |asset|
asset.associations[:attachable] = nil
((id_map[asset.attachable_type] ||= {})[asset.attachable_id] ||= []) << asset
end
id_map.each do |klass_name, id_map|
klass = klass_name.constantize
klass.where(klass.primary_key=>id_map.keys).all do |attach|
id_map[attach.pk].each do |asset|
asset.associations[:attachable] = attach
end
end
end
end)
end
class Post < Sequel::Model
one_to_many :assets, key: :attachable_id, reciprocal: :attachable, conditions: {attachable_type: 'Post'},
adder: lambda{|asset| asset.update(attachable_id: pk, attachable_type: 'Post')},
remover: lambda{|asset| asset.update(attachable_id: nil, attachable_type: nil)},
clearer: lambda{assets_dataset.update(attachable_id: nil, attachable_type: nil)}
end
class Note < Sequel::Model
one_to_many :assets, key: :attachable_id, reciprocal: :attachable, conditions: {attachable_type: 'Note'},
adder: lambda{|asset| asset.update(attachable_id: pk, attachable_type: 'Note')},
remover: lambda{|asset| asset.update(attachable_id: nil, attachable_type: nil)},
clearer: lambda{assets_dataset.update(attachable_id: nil, attachable_type: nil)}
end
@asset.attachable = @post
@asset.attachable = @note
=== Joining on multiple keys
Let's say you have two tables that are associated with each other with multiple
keys. This can be handled using Sequel's built in composite key support for
associations:
# Both of these models have an album_id, number, and disc_number fields.
# All FavoriteTracks have an associated track, but not all tracks have an
# associated favorite track
class Track < Sequel::Model
many_to_one :favorite_track, key: [:disc_number, :number, :album_id], primary_key: [:disc_number, :number, :album_id]
end
class FavoriteTrack < Sequel::Model
one_to_one :tracks, key: [:disc_number, :number, :album_id], primary_key: [:disc_number, :number, :album_id]
end
=== Tree - All Ancestors and Descendants
Let's say you want to store a tree relationship in your database, it's pretty
simple:
class Node < Sequel::Model
many_to_one :parent, class: self
one_to_many :children, key: :parent_id, class: self
end
You can easily get a node's parent with node.parent, and a node's children with
node.children. You can even eager load the relationship up to a certain depth:
# Eager load three generations of generations of children for a given node
Node.where(id: 1).eager(children: {children: :children}).all.first
# Load parents and grandparents for a group of nodes
Node.where{id < 10}.eager(parent: :parent).all
What if you want to get all ancestors up to the root node, or all descendants,
without knowing the depth of the tree?
class Node < Sequel::Model
many_to_one :ancestors, class: self,
eager_loader: (lambda do |eo|
# Handle cases where the root node has the same parent_id as primary_key
# and also when it is NULL
non_root_nodes = eo[:rows].reject do |n|
if [nil, n.pk].include?(n.parent_id)
# Make sure root nodes have their parent association set to nil
n.associations[:parent] = nil
true
else
false
end
end
unless non_root_nodes.empty?
id_map = {}
# Create an map of parent_ids to nodes that have that parent id
non_root_nodes.each{|n| (id_map[n.parent_id] ||= []) << n}
# Doesn't cause an infinite loop, because when only the root node
# is left, this is not called.
Node.where(id: id_map.keys).eager(:ancestors).all do |node|
# Populate the parent association for each node
id_map[node.pk].each{|n| n.associations[:parent] = node}
end
end
end)
many_to_one :descendants, eager_loader: (lambda do |eo|
id_map = {}
eo[:rows].each do |n|
# Initialize an empty array of child associations for each parent node
n.associations[:children] = []
# Populate identity map of nodes
id_map[n.pk] = n
end
# Doesn't cause an infinite loop, because the :eager_loader is not called
# if no records are returned. Exclude id = parent_id to avoid infinite loop
# if the root note is one of the returned records and it has parent_id = id
# instead of parent_id = NULL.
Node.where(parent_id: id_map.keys).exclude(id: :parent_id).eager(:descendants).all do |node|
# Get the parent from the identity map
parent = id_map[node.parent_id]
# Set the child's parent association to the parent
node.associations[:parent] = parent
# Add the child association to the array of children in the parent
parent.associations[:children] << node
end
end)
end
Note that Sequel ships with an rcte_tree plugin that does all of the above and more:
class Node < Sequel::Model
plugin :rcte_tree
end
=== Joining multiple keys to a single key, through a third table
Let's say you have a database of songs, lyrics, and artists. Each song
may or may not have a lyric (most songs are instrumental). The lyric can be
associated to an artist in each of four ways: composer, arranger, vocalist,
or lyricist. These may all be the same, or they could all be different, and
none of them are required. The songs table has a lyric_id field to associate
it to the lyric, and the lyric table has four fields to associate it to the
artist (composer_id, arranger_id, vocalist_id, and lyricist_id).
What you want to do is get all songs for a given artist, ordered by the song's
name, with no duplicates?
class Artist < Sequel::Model
one_to_many :songs, order: Sequel[:songs][:name],
dataset: proc{Song.select_all(:songs).join(:lyrics, id: :lyric_id, id=>[:composer_id, :arranger_id, :vocalist_id, :lyricist_id])},
eager_loader: (lambda do |eo|
h = eo[:id_map]
ids = h.keys
eo[:rows].each{|r| r.associations[:songs] = []}
Song.select_all(:songs).
select_append{[lyrics[:composer_id], lyrics[:arranger_id], lyrics[:vocalist_id], lyrics[:lyricist_id]]}.
join(:lyrics, id: :lyric_id){Sequel.or(composer_id: ids, arranger_id: ids, vocalist_id: ids, lyricist_id: ids)}.
order{songs[:name]}.all do |song|
[:composer_id, :arranger_id, :vocalist_id, :lyricist_id].each do |x|
recs = h[song.values.delete(x)]
recs.each{|r| r.associations[:songs] << song} if recs
end
end
eo[:rows].each{|r| r.associations[:songs].uniq!}
end)
end
=== Statistics Associations (Sum of Associated Table Column)
In addition to getting associated records, you can use Sequel's association support
to get aggregate information for columns in associated tables (sums, averages, etc.).
Let's say you have a database with projects and tickets. A project can have many
tickets, and each ticket has a number of hours associated with it. You can use the
association support to create a Project association that gives the sum of hours for all
associated tickets.
class Project < Sequel::Model
one_to_many :tickets
many_to_one :ticket_hours, read_only: true, key: :id,
dataset: proc{Ticket.where(project_id: id).select{sum(hours).as(hours)}},
eager_loader: (lambda do |eo|
eo[:rows].each{|p| p.associations[:ticket_hours] = nil}
Ticket.where(project_id: eo[:id_map].keys).
select_group(:project_id).
select_append{sum(hours).as(hours)}.
all do |t|
p = eo[:id_map][t.values.delete(:project_id)].first
p.associations[:ticket_hours] = t
end
end)
# The association method returns a Ticket object with a single aggregate
# sum-of-hours value, but you want it to return an Integer/Float of just the
# sum of hours, so you call super and return just the sum-of-hours value.
# This works for both lazy loading and eager loading.
def ticket_hours
if s = super
s[:hours]
end
end
end
class Ticket < Sequel::Model
many_to_one :project
end
Note that it is often better to use a sum cache instead of this approach. You can implement
a sum cache using +after_create+, +after_update+, and +after_delete+ hooks, or preferably using a database trigger.
sequel-5.63.0/doc/association_basics.rdoc 0000664 0000000 0000000 00000204736 14342141206 0020372 0 ustar 00root root 0000000 0000000 = Association Basics
This guide is based on http://guides.rubyonrails.org/association_basics.html
== Why Associations?
Associations exist to simplify code that deals with related rows in separate
database tables. Without associations, if you had classes such as:
class Artist < Sequel::Model
end
class Album < Sequel::Model
end
And you wanted to get all of the albums for a given artist (assuming each
album was associated with only one artist):
Album.where(artist_id: @artist.id).all
Or maybe you want to add an album for a given artist:
Album.create(artist_id: @artist.id, name: 'RF')
With associations, you can make the above code simpler, by setting up associations
between the two models:
class Artist < Sequel::Model
one_to_many :albums
end
class Album < Sequel::Model
many_to_one :artist
end
Then, the code to retrieve albums related to the artist is simpler:
@artist.albums
As is the code to add a related album to an artist:
@artist.add_album(name: 'RF')
It also makes it easier to create queries that use joins based on the association:
Artist.association_join(:albums)
# SELECT * FROM artists
# INNER JOIN albums ON (albums.artist_id = artists.id)
== The Types of Associations
Sequel has five different association types built in:
* many_to_one
* one_to_many
* one_to_one
* many_to_many
* one_through_one
It ships with additional association types via plugins.
=== many_to_one
The many_to_one association is used when the table for the current class
contains a foreign key that references the primary key in the table for the
associated class. It is named 'many_to_one' because there can be many rows
in the current table for each row in the associated table.
# Database schema:
# albums artists
# :id /--> :id
# :artist_id --/ :name
# :name
class Album
# Uses singular form of associated model name
many_to_one :artist
end
=== one_to_many and one_to_one
The one_to_many association is used when the table for the associated class
contains a foreign key that references the primary key in the table for the
current class. It is named 'one_to_many' because for each row in the
current table there can be many rows in the associated table:
The one_to_one association can be thought of as a subset of the one_to_many association,
but where there can only be either 0 or 1 records in the associated table. This is
useful if there is a unique constraint on the foreign key field in the associated table.
It's also useful if you want to impose an order on the association and just want the
first record returned.
# Database schema:
# artists albums
# :id <----\ :id
# :name \----- :artist_id
# :name
class Artist
# Uses plural form of associated model name
one_to_many :albums
# Uses singular form of associated model name
one_to_one :album
end
=== many_to_many and one_through_one
The many_to_many association allows each row in the current table to be associated
to many rows in the associated table, and each row in the associated table to
many rows in the current table, by using a join table to associate the two tables.
The one_through_one association can be thought of as a subset of the many_to_many
association, but where there can only be 0 or 1 records in the associated table.
This is useful if there is a unique constraint on the foreign key in the join table
that references the current table. It's also useful if you want to impose an order
on the association and just want the first record returned. The one_through_one
association is so named because it sets up a one-to-one association through a
single join table.
# Database schema:
# albums
# :id <----\
# :name \ albums_artists
# \---- :album_id
# artists /---- :artist_id
# :id <-----/
# :name
class Artist
# Uses plural form of associated model name
many_to_many :albums
# Uses singular form of associated model name
one_through_one :album
end
=== Differences Between many_to_one and one_to_one
If you want to setup a 1-1 relationship between two models, where the
foreign key in one table references the associated table directly, you have to use
many_to_one in one model, and one_to_one in the other model. How do you
know which to use in which model?
The simplest way to remember is that the model whose table has the foreign
key uses many_to_one, and the other model uses one_to_one:
# Database schema:
# artists albums
# :id <----\ :id
# :name \----- :artist_id
# :name
class Artist
one_to_one :album
end
class Album
many_to_one :artist
end
== Most Common Options
=== :key
The :key option must be used if the default column symbol that Sequel would use is not
the correct column. For example:
class Album
# Assumes :key is :artist_id, based on association name of :artist
many_to_one :artist
end
class Artist
# Assumes :key is :artist_id, based on class name of Artist
one_to_many :albums
end
However, if your schema looks like:
# Database schema:
# artists albums
# :id <----\ :id
# :name \----- :artistid # Note missing underscore
# :name
Then the default :key option will not be correct. To fix this, you need to
specify an explicit :key option:
class Album
many_to_one :artist, key: :artistid
end
class Artist
one_to_many :albums, key: :artistid
end
For many_to_many associations, the :left_key and :right_key options can be
used to specify the column names in the join table, and the :join_table
option can be used to specify the name of the join table:
# Database schema:
# albums
# :id <----\
# :name \ albumsartists
# \---- :albumid
# artists /---- :artistid
# :id <-----/
# :name
class Artist
# Note that :left_key refers to the foreign key pointing to the
# current table, and :right_key the foreign key pointing to the
# associated table.
many_to_many :albums, left_key: :artistid, right_key: :albumid,
join_table: :albumsartists
end
class Album
many_to_many :artists, left_key: :albumid, right_key: :artistid,
join_table: :albumsartists
end
=== :class
If the class of the association cannot be guessed directly by looking at
the association name, you need to specify it via the :class option. For
example, if you have two separate foreign keys in the albums table that
both point to the artists table, maybe to indicate one artist is the
vocalist and one is the composer, you'd have to use the :class option:
# Database schema:
# artists albums
# :id <----\ :id
# :name \----- :vocalist_id
# \---- :composer_id
# :name
class Album
many_to_one :vocalist, class: :Artist
many_to_one :composer, class: :Artist
end
class Artist
one_to_many :vocalist_albums, class: :Album, key: :vocalist_id
one_to_many :composer_albums, class: :Album, key: :composer_id
end
== Self-referential Associations
Self-referential associations are easy to handle in Sequel. The simplest
example is a tree structure:
# Database schema:
# nodes
# :id <--\
# :parent_id ---/
# :name
class Node
many_to_one :parent, class: self
one_to_many :children, key: :parent_id, class: self
end
For many_to_many self_referential associations, it's fairly similar. Here's
an example of a directed graph:
# Database schema:
# nodes edges
# :id <----------- :successor_id
# :name \----- :predecessor_id
class Node
many_to_many :direct_predecessors, left_key: :successor_id,
right_key: :predecessor_id, join_table: :edges, class: self
many_to_many :direct_successors, right_key: :successor_id,
left_key: :predecessor_id, join_table: :edges, class: self
end
== Methods Added
When you create an association, it's going to add instance methods to
the class related to the association.
All associations are going to have an instance method added with the
same name as the association:
@artist.albums
@album.artists
many_to_one and one_to_one associations will also have a setter method
added to change the associated object:
@album.artist = Artist.create(name: 'YJM')
many_to_many and one_to_many associations will have three methods added:
add_* :: to associate an object to the current object
remove_* :: to disassociate an object from the current object
remove_all_* :: to dissociate all currently associated objects
Examples:
@artist.add_album(@album)
@artist.remove_album(@album)
@artist.remove_all_albums
Note that the remove_all_* method does not call remove hooks defined on
the association, it just issues a single query to the database. If you
want to remove all associated objects and call remove hooks, iterate
over the array of associated objects and call remove_* for each:
@artist.albums.each do |album|
@artist.remove_album(album)
end
== Caching
Associations are cached after being retrieved:
@artist.album # Not cached - Database Query
@artist.album # Cached - No Database Query
@album.artists # Not cached - Database Query
@album.artists # Cached - No Database Query
You can choose to ignore the cached versions and do a database query to
retrieve results by passing a reload: true option to the association method:
@album.artists # Not cached - Database Query
@album.artists # Cached - No Database Query
@album.artists(reload: true) # Ignore cache - Database Query
If you reload/refresh the object, it will automatically clear the
associations cache for the object:
@album.artists # Not cached - Database Query
@album.artists # Cached - No Database Query
@album.reload
@album.artists # Not Cached - Database Query
If you want direct access to the associations cache, use the associations
instance method:
@album.associations # {}
@album.associations[:artists] # nil
@album.artists # [, ...]
@album.associations[:artists] # [, ...]
=== Code Reloading
When declaring associations, Sequel caches association metadata in the association reflection. If you're doing any code reloading that doesn't involve restarting the related process, you should disable caching of the association reflection, to avoid stale model classes still being referenced after reloading:
Sequel::Model.cache_associations = false
== Dataset Method
In addition to the above methods, associations also add an instance method
ending in +_dataset+ that returns a dataset representing the objects in the associated table:
@album.artist_id
# 10
@album.artist_dataset
# SELECT * FROM artists WHERE (id = 10) LIMIT 1
@artist.id
# 20
@artist.albums_dataset
# SELECT * FROM albums WHERE (artist_id = 20)
The association dataset is just like any other Sequel dataset, in that
it can be further filtered, ordered, etc.:
@artist.albums_dataset.
where(Sequel.like(:name, 'A%')).
order(:copies_sold).
limit(10)
# SELECT * FROM albums
# WHERE ((artist_id = 20) AND (name LIKE 'A%' ESCAPE '\'))
# ORDER BY copies_sold LIMIT 10
Records retrieved using the +_dataset+ method are not cached in the
associations cache.
@album.artists_dataset.all # [, ...]
@album.associations[:artists] # nil
== Dynamic Association Modification
Similar to the +_dataset+ method, you can provide a block to the association
method to customize the dataset that will be used to retrieve the records. So
you can apply a filter in either of these two ways:
@artist.albums_dataset.where(Sequel.like(:name, 'A%'))
@artist.albums{|ds| ds.where(Sequel.like(:name, 'A%'))}
While they both apply the same filter, using the +_dataset+ method does not
apply any of the association callbacks or handle association reciprocals (see
below for details about callbacks and reciprocals). Using a block instead handles
all those things, and also caches its results in the associations cache (ignoring
any previously cached value).
== Filtering By Associations
In addition to using the association method to get associated objects, you
can also use associated objects in filters. For example, to get
all albums for a given artist, you would usually do:
@artist.albums
# or @artist.albums_dataset for a dataset
You can also do the following:
Album.where(artist: @artist).all
# or leave off the .all for a dataset
For filtering by a single association, this isn't very useful. However, unlike
using the association method, using a filter allows you to filter by multiple
associations:
Album.where(artist: @artist, publisher: @publisher)
This will return all albums by that artist and published by that publisher.
This isn't possible using just the association method approach, though you
can combine the approaches:
@artist.albums_dataset.where(publisher: @publisher)
This doesn't just work for +many_to_one+ associations, it also works for
the other associations:
Album.one_to_one :album_info
# The album related to that AlbumInfo instance
Album.where(album_info: AlbumInfo[2])
Album.one_to_many :tracks
# The album related to that Track instance
Album.where(tracks: Track[3])
Album.many_to_many :tags
# All albums related to that Tag instance
Album.where(tags: Tag[4])
Album.one_through_one :tag
# All albums related to that Tag instance
Album.where(tag: Tag[4])
Note that for +one_to_many+ and +many_to_many+ associations, you still
use the plural form even though only a single model object is given.
You can also exclude by associations:
Album.exclude(artist: @artist).all
This will return all albums not by that artist.
You can also provide an array with multiple model objects:
Album.where(artist: [@artist1, @artist2]).all
Similar to using an array of integers or strings, this will return
all albums whose artist is one of those two artists. You can also
use +exclude+ if you want all albums not by either of those artists:
Album.exclude(artist: [@artist1, @artist2]).all
If you are using a +one_to_many+ or +many_to_many+ association, you
may want to return records where the records matches all of multiple
records, instead of matching any of them. For example:
Album.where(tags: [@tag1, @tag2])
This matches albums that are associated with either @tag1 or @tag2 or
both. If you only want ones that you are associated with both, you can
use separate filter calls:
Album.where(tags: @tag1).where(tags: @tag2)
Or the array form of condition specifiers:
Album.where([[:tags, @tag1], [:tags, @tag2]])
These will return albums associated with both @tag1 and @tag2.
You can also provide a dataset value when filtering by associations:
Album.where(artist: Artist.where(Sequel.like(:name, 'A%'))).all
This will return all albums whose artist starts with 'A'. Like
the other forms, this can be inverted:
Album.exclude(artist: Artist.where(Sequel.like(:name, 'A%'))).all
This will return all albums whose artist does not start with 'A'.
Filtering by associations even works for associations that have
conditions added via the :conditions option or a block:
Album.one_to_many :popular_tags, clone: :tags do |ds|
ds.where{times_used > 1000}
end
Album.where(popular_tags: [@tag1, @tag2])
This will return all albums that whose popular tags would include
at least one of those tags.
Note that filtering by associations does not work for associations
that use blocks with instance-specific code.
== Name Collisions
Because associations create instance methods, it's possible to override
existing instance methods if you name an association the same as an
existing method. For example, values and associations
would be bad association names.
== Database Schema
Creating an association doesn't modify the database schema. Sequel
assumes your associations reflect the existing database schema. If not,
you should modify your schema before creating the associations.
=== many_to_one/one_to_many
For example, for the following model code:
class Album
many_to_one :artist
end
class Artist
one_to_many :albums
end
You probably want the following database schema:
# albums artists
# :id /--> :id
# :artist_id --/ :name
# :name
Which could be created using the following Sequel code:
DB.create_table(:artists) do
# Primary key must be set explicitly
primary_key :id
String :name, null: false, unique: true
end
DB.create_table(:albums) do
primary_key :id
# Table that foreign key references needs to be set explicitly
# for a database foreign key reference to be created.
foreign_key :artist_id, :artists, null: false
String :name, null: false, unique: true
end
If you already had a schema such as:
# Database schema:
# albums artists
# :id :id
# :name :name
Then you just need to add the column:
DB.alter_table(:albums) do
add_foreign_key :artist_id, :artists, null: false
end
=== many_to_many
With many_to_many associations, the default join table for the association
uses the sorted underscored names of both model classes. For example, with
the following model code:
class Album
many_to_many :artists
end
class Artist
many_to_many :albums
end
The default join table name would be albums_artists, not
artists_albums, because:
["artists", "albums"].sort.join('_')
# "albums_artists"
Assume you already had the albums and artists tables created, and you just
wanted to add an albums_artists join table to create the following schema:
# Database schema:
# albums
# :id <----\
# :name \ albums_artists
# \---- :album_id
# artists /---- :artist_id
# :id <-----/
# :name
You could use the following Sequel code:
DB.create_join_table(album_id: :albums, artist_id: :artists)
# or
DB.create_table(:albums_artists) do
foreign_key :album_id, :albums, null: false
foreign_key :artist_id, :artists, null: false
primary_key [:album_id, :artist_id]
index [:artist_id, :album_id]
end
== Association Scope
If you nest your Sequel::Model classes inside modules, then you should know
that Sequel will only look in the same module for associations by default.
So the following code will work fine:
module App
class Artist < Sequel::Model
one_to_many :albums
end
class Album < Sequel::Model
many_to_one :artist
end
end
However, if you enclose your model classes inside two different modules,
things will not work by default:
module App1
class Artist < Sequel::Model
one_to_many :albums
end
end
module App2
class Album < Sequel::Model
many_to_one :artist
end
end
To fix this, you need to specify the full model class name using the
:class option:
module App1
class Artist < Sequel::Model
one_to_many :albums, class: "App2::Album"
end
end
module App2
class Album < Sequel::Model
many_to_one :artist, class: "App1::Artist"
end
end
If both classes are in the same module, but the default class name
used is not correct, you need to specify the full class name with the
:class option:
module App1
class AlbumArtist < Sequel::Model
one_to_many :albums
end
class Album < Sequel::Model
many_to_one :artist, class: "App1::AlbumArtist"
end
end
== Method Details
In all of these methods, _association_ is replaced by the symbol you
pass to the association.
=== _association_(opts={}) (e.g. albums)
For +many_to_one+ and +one_to_one+ associations, the _association_ method
returns either the single object associated, or nil if no object is
associated.
@artist = @album.artist
For +one_to_many+ and +many_to_many+ associations, the _association_ method
returns an array of associated objects, which may be empty if no objects
are currently associated.
@albums = @artist.albums
=== _association_=(object_to_associate) (e.g. artist=) [+many_to_one+ and +one_to_one+]
The _association_= method sets up an association of the passed object to
the current object. For +many_to_one+ associations, this sets the
foreign key for the current object to point to the associated
object's primary key.
@album.artist = @artist
For +one_to_one+ associations, this sets the foreign key of the
associated object to the primary key value of the current object.
For +many_to_one+ associations, this does not save the current object.
For +one_to_one+ associations, this does save the associated object.
=== add_association(object_to_associate) (e.g. add_album) [+one_to_many+ and +many_to_many+]
The add_association method associates the passed object to the current
object. For +one_to_many+ associations, it sets the foreign key of the
associated object to the primary key value of the current object, and
saves the associated object. For +many_to_many+ associations, this inserts
a row into the join table with the foreign keys set to the primary key values
of the current and associated objects. Note that the singular form of the
association name is used in this method.
@artist.add_album(@album)
In addition to passing an actual associated object, you can pass a hash,
and a new associated object will be created from them:
@artist.add_album(name: 'RF') # creates Album object
The add_association method returns the new associated object:
@album = @artist.add_album(name: 'RF')
Note that the add_* methods for +one_to_many+ persist the changes by
saving the passed in (or newly created) object. However, to avoid
silent failures of these methods, they explicitly raise exceptions
even when raise_on_save_failure is false for the associated model.
You can disable this behavior (i.e. return nil instead of raising
exceptions on a save failure) by setting the raise_on_save_failure: false
option for the association.
=== remove_association(object_to_disassociate) (e.g. remove_album) [+one_to_many+ and +many_to_many+]
The remove_association method disassociates the passed object from
the current object. For +one_to_many+ associations, it sets the foreign key of
the associated object to NULL, and saves the associated object. For
+many_to_many+ associations, this deletes the matching row in the join table.
Similar to the add_association method, the singular form of the
association name is used in this method.
@artist.remove_album(@album)
Note that this does not delete @album from the database, it only
disassociates it from the @artist. To delete @album from the
database:
@album.destroy
The add_association and remove_association methods should be
thought of as adding and removing from the association, not from the database.
In addition to passing the object directly to remove_association, you
can also pass the associated object's primary key:
@artist.remove_album(10)
This will look up the associated object using the key, and remove that
album.
The remove_association method returns the now disassociated object:
@album = @artist.remove_album(10)
=== remove_all_association (e.g. remove_all_albums) [+one_to_many+ and +many_to_many+]
The remove_all_association method disassociates all currently associated
objects. For +one_to_many+ associations, it sets the foreign key of
all associated objects to NULL in a single query. For +many_to_many+
associations, this deletes all matching rows in the join table.
Unlike the add_association and remove_association method, the
plural form of the association name is used in this method.
The remove_all_association method returns the previously cached associated
records, or nil if there were no cached associated records.
=== association_dataset (e.g. albums_dataset)
The association_dataset method returns a dataset that represents
all associated objects. This dataset is like any other Sequel dataset,
in that it can be filtered, ordered, etc.:
ds = @artist.albums_dataset.where(Sequel.like(:name, 'A%')).order(:copies_sold)
Unlike most other Sequel datasets, association datasets have a couple of
added methods:
ds.model_object # @artist
ds.association_reflection # same as Artist.association_reflection(:albums)
For a more info on Sequel's reflection capabilities see the {Reflection page}[rdoc-ref:doc/reflection.rdoc].
== Overriding Method Behavior
Sequel is designed to be very flexible. If the default behavior of the
association modification methods isn't what you desire, you can override
the methods in your classes. However, you should be aware that for each
of the association modification methods described, there is a private
method that is preceded by an underscore that does the actual
modification. The public method without the underscore handles caching
and callbacks, and shouldn't be overridden by the user.
In addition to overriding the private method in your class, you can also
use association options to change which method Sequel defines. The
only difference between the two is that if you use an association option
to change the method Sequel defines, you cannot call super to get the
default behavior.
=== :setter (_association= method)
Let's say you want to set a specific field whenever associating an object
using the association setter method. For example, let's say you have
a file_under column for each album to tell you where to file it. If the
album is associated with an artist, it should be filed under the artist's
name and the album's name, otherwise it should just use the album's name.
class Album < Sequel::Model
many_to_one :artist, setter: (lambda do |artist|
if artist
self.artist_id = artist.id
self.file_under = "#{artist.name}-#{name}"
else
self.artist_id = nil
self.file_under = name
end
end)
end
The above example is contrived, as you would generally use a before_save model
hook to handle such a modification. However, if you only modify the album's
artist using the artist= method, this approach may perform better.
=== :adder (\_add_association method)
Continuing with the same example, here's how you would handle the same case if
you also wanted to handle the Artist#add_album method:
class Artist < Sequel::Model
one_to_many :albums, adder: (lambda do |album|
album.update(artist_id: id, file_under: "#{name}-#{album.name}")
end)
end
You can set this to +nil+ to not create a add_association method.
=== :remover (\_remove_association method)
Continuing with the same example, here's how you would handle the same case if
you also wanted to handle the Artist#remove_album method:
class Artist < Sequel::Model
one_to_many :albums, remover: (lambda do |album|
album.update(artist_id: nil, file_under: album.name)
end)
end
You can set this to +nil+ to not create a remove_association method.
=== :clearer (\_remove_all_association method)
Continuing with the same example, here's how you would handle the same case if
you also wanted to handle the Artist#remove_all_albums method:
class Artist < Sequel::Model
one_to_many :albums, clearer: (lambda do
# This is Dataset#update, not Model#update, so the file_under: :name
# ends up being "SET file_under = name" in SQL.
albums_dataset.update(artist_id: nil, file_under: :name)
end)
end
You can set this to +nil+ to not create a remove_all_association method.
=== :no_dataset_method
Setting this to true will not result in the association_dataset method
not being defined. This can save memory if you only use the association
method and do not call the association_dataset method directly or
indirectly.
=== :no_association_method
Setting this to true will not result in the association method
not being defined. This can save memory if you only use the
association_dataset method and do not call the association method
directly or indirectly.
== Association Options
Sequel's associations mostly share the same options. For ease of understanding,
they are grouped here by section.
The defaults for any of these options can be set at the class level using
Sequel::Model.default_association_options. To make
associations read only by default:
Sequel::Model.default_association_options[:read_only] = true
Many of these options are specific to particular association types, and
the defaults can be set on a per association type basis. To make one_to_many
associations read only by default:
Sequel::Model.default_association_type_options[:one_to_many] = {read_only: true}
=== Association Dataset Modification Options
==== block
All association defining methods take a block that is passed the
default dataset and should return a modified copy of the dataset to
use for the association. For example, if you wanted an association
that returns all albums of an artist that went gold (sold at least
500,000 copies):
Artist.one_to_many :gold_albums, class: :Album do |ds|
ds.where{copies_sold > 500000}
end
The result of the block is cached as an optimization. One of the side
effects of that is that if your block depends on external state, it won't
work correctly unless you setup a delayed evaluation. For example:
Artist.one_to_many :gold_albums, class: :Album do |ds|
ds.where{copies_sold > $gold_limit}
end
In this case if you change $gold_limit later, the changes won't
effect the association. If you want to pick up changes to $gold_limit,
you need to setup a delayed evaluation:
Artist.one_to_many :gold_albums, class: :Album do |ds|
ds.where{copies_sold > Sequel.delay{$gold_limit}}
end
==== :class
This is the class of the associated objects that will be used. It's
one of the most commonly used options. If it is not given, it guesses
based on the name of the association, including considering the namespace
of the current model. If a *_to_many association is used, this uses the
singular form of the association name. For example:
Album.many_to_one :artist # guesses Artist
Artist.one_to_many :albums # guesses Album
Foo::Artist.one_to_many :albums # guesses Foo::Album
However, for more complex associations, especially ones that add
additional filters beyond the foreign/primary key relationships, the
default class guessed will be wrong:
# guesses GoldAlbum
Artist.one_to_many :gold_albums do |ds|
ds.where{copies_sold > 500000}
end
You can specify the :class option using the class itself, a Symbol,
or a String:
Album.many_to_one :artist, class: Artist # Class
Album.many_to_one :artist, class: :Artist # Symbol
Album.many_to_one :artist, class: "Artist" # String
If you are namespacing your models, and you need to specify the :class
option, the path you give to the :class option should be the full path
to the associated class including any namespaces:
Foo::Album.many_to_one :artist # Uses Foo::Artist
Foo::Album.many_to_one :artist, class: "Artist" # Uses Artist
Foo::Album.many_to_one :artist, class: "Foo::Artist" # Uses Foo::Artist
==== :key
For +many_to_one+ associations, this is the foreign_key in the current model's
table that references the associated model's primary key as a symbol.
Defaults to :association_id.
Album.many_to_one :artist, key: :artistid
For +one_to_one+ and +one_to_many+ associations, is the foreign key in
associated model's table that references current model's primary key, as a
symbol. Defaults to :"#{self.name.underscore}_id".
Artist.one_to_many :albums, key: :artistid
In both cases an array of symbols can be used for a composite key association:
Apartment.many_to_one :building, key: [:city, :address]
==== :conditions
The conditions to use to filter the association, can be any argument passed to +where+.
If you use a hash or an array of two element arrays, this will also be used as a
filter when using eager_graph or association_join to load the association.
If you do not use a hash or array of two element arrays, you should use the
:graph_conditions, :graph_only_conditions, or :graph_block option or you will not
be able to use eager_graph or association_join with the association.
Artist.one_to_many :good_albums, class: :Album, conditions: {good: true}
@artist.good_albums
# SELECT * FROM albums WHERE ((artist_id = 1) AND (good IS TRUE))
==== :order
The column(s) by which to order the association dataset. Can be a
singular column or an array.
Artist.one_to_many :albums_by_name, class: :Album, order: :name
Artist.one_to_many :albums_by_num_tracks, class: :Album, order: [:num_tracks, :name]
==== :select
The columns to SELECT when loading the association. For most associations,
it defaults to nil, so * is used. For +many_to_many+ associations, it
defaults to the associated class's table_name.*, which means it doesn't include
the columns from the join table. This is to prevent the common issue where the
join table includes columns with the same name as columns in the associated
table, in which case the joined table's columns would usually end up clobbering
the values in the associated table. If you want to include the join table
attributes, you can use this option, but beware that the join table columns
can clash with columns from the associated table, so you should alias any
columns that have the same name in both the join table and the associated
table. Example:
Artist.one_to_many :albums, select: [:id, :name]
Album.many_to_many :tags, select: [Sequel[:tags].*, Sequel[:albums_tags][:number]]
==== :limit
Limit the number of records to the provided value:
Artist.one_to_many :best_selling_albums, class: :Album, order: :copies_sold, limit: 5
Use an array with two arguments for the value to specify a limit and an offset.
Artist.one_to_many :next_best_selling_albums, class: :Album, order: :copies_sold, limit: [10, 5]
# LIMIT 10 OFFSET 5
This probably doesn't make a lot of sense for *_to_one associations, though you
could use it to specify an offset.
==== :join_table [+many_to_many+, +one_through_one+]
Name of table that includes the foreign keys to both the current model and the
associated model, as a symbol. Defaults to the name of current model and name
of associated model, pluralized, underscored, sorted, and joined with '_'.
Here's an example of the defaults:
Artist.many_to_many :albums, join_table: :albums_artists
Album.many_to_many :artists, join_table: :albums_artists
Person.many_to_many :colleges, join_table: :colleges_people
==== :left_key [+many_to_many+, +one_through_one+]
Foreign key in join table that points to current model's primary key, as a
symbol. Defaults to :"#{model_name.underscore}_id".
Album.many_to_many :tags, left_key: :album_id
Can use an array of symbols for a composite key association.
==== :right_key [+many_to_many+, +one_through_one+]
Foreign key in join table that points to associated model's primary key, as a
symbol. Defaults to :"#{association_name.singularize}_id" for +many_to_many+
and :"#{association_name}_id" for +one_through_one+.
Album.many_to_many :tags, right_key: :tag_id
Can use an array of symbols for a composite key association.
==== :distinct
Use the DISTINCT clause when selecting associating object, both when lazy
loading and eager loading via eager (but not when using eager_graph).
This is most useful for many_to_many associations that use join tables that
contain more than just the foreign keys, where you are storing additional
information. For example, if you have a database of people, degree types, and
colleges, and you want to return all people from a given college, you may want
to use :distinct so that if a person has two separate degrees from the same
college, they won't show up twice.
==== :clone
The :clone option clones an existing association, taking the options
you specified for that association, and making a copy of them for this
association. Other options provided by this association are then merged
into the cloned options.
This is commonly used if you have a bunch of similar associations that
you want to DRY up:
one_to_many :english_verses, class: :LyricVerse, key: :lyricsongid,
order: :number, conditions: {languageid: 1}
one_to_many :romaji_verses, clone: :english_verses, conditions: {languageid: 2}
one_to_many :japanese_verses, clone: :english_verses, conditions: {languageid: 3}
Note that for the final two asociations, you didn't have to specify the :class,
:key, or :order options, as they were copied by the :clone option. By specifying
the :conditions option for the final two associations, it overrides the :conditions
option of the first association, it doesn't attempt to merge them.
In addition to the options hash, the :clone option will copy a block argument
from the existing situation. If you want a cloned association to not have the
same block as the association you are cloning from, specify the block: nil option
in addition to the :clone option.
==== :dataset
This is generally only specified for custom associations that aren't based on
primary/foreign key relationships. It should be a proc that is instance_execed
to get the base dataset to use before the other options are applied.
If the proc accepts an argument, it is passed the related association reflection.
For best performance, it's recommended that custom associations call the
+associated_dataset+ method on the association reflection as the starting point
for the dataset to return. The +associated_dataset+ method will return a
dataset based on the associated class with most of the association options
already applied, and the proc should return a modified copy of this dataset.
Here's an example of an association of songs to artists through lyrics, where
the artist can perform any one of four tasks for the lyric:
Artist.one_to_many :songs, dataset: (lambda do |r|
r.associated_dataset.select_all(:songs).
join(:lyrics, id: :lyricid, id=>[:composer_id, :arranger_id, :vocalist_id, :lyricist_id])
end)
Artist.first.songs_dataset
# SELECT songs.* FROM songs
# INNER JOIN lyrics ON ((lyrics.id = songs.lyric_id)
# AND (1 IN (composer_id, arranger_id, vocalist_id, lyricist_id))
==== :extend
A module or array of modules to extend the dataset with. These are used to
set up association extensions. For more information , please see the
{Advanced Associations page}[rdoc-ref:doc/advanced_associations.rdoc].
==== :primary_key [+many_to_one+, +one_to_one+, +one_to_many+]
The column that the :key option references, as a symbol. For +many_to_one+
associations, this column is in the associated table. For +one_to_one+ and
+one_to_many+ associations, this column is in the current table. In both cases,
it defaults to the primary key of the table. Can use an
array of symbols for a composite key association.
Artist.set_primary_key :arid
Artist.one_to_many :albums, primary_key: :arid
Album.one_to_many :artist, primary_key: :arid
==== :left_primary_key [+many_to_many+, +one_through_one+]
Column in current table that :left_key option points to, as a symbol.
Defaults to primary key of current table.
Album.set_primary_key :alid
Album.many_to_many :tags, left_primary_key: :alid
Can use an array of symbols for a composite key association.
==== :right_primary_key [+many_to_many+, +one_through_one+]
Column in associated table that :right_key points to, as a symbol.
Defaults to primary key of the associated table.
Tag.set_primary_key :tid
Album.many_to_many :tags, right_primary_key: :tid
Can use an array of symbols for a composite key association.
==== :join_table_block [+many_to_many+, +one_through_one+]
A proc that can be used to modify the dataset used in the add/remove/remove_all
methods. It's separate from the association block, as that is called on a
join of the join table and the associated table, whereas this option just
applies to the join table. It can be used to make sure that filters are used
when deleting.
Artist.many_to_many :lead_guitar_albums, class: :Album, join_table_block: (lambda do |ds|
ds.where(instrument_id: 5)
end)
==== :join_table_db [+many_to_many+, +one_through_one+]
A Sequel::Database to use for the join table. Specifying this option switches the
loading to use a separate query for the join table. This is useful if the
join table is not located in the same database as the associated table, or
if the database account with access to the associated table doesn't have
access to the join table.
For example, if the Album class uses a different Sequel::Database than the Artist
class, and the join table is in the database that the Artist class uses:
Artist.many_to_many :lead_guitar_albums, class: :Album, join_table_db: Artist.db
This option also affects the add/remove/remove_all methods, by changing
which database is used for inserts/deletes from the join table (add/remove/remove_all
defaults to use the current model's database instead of the associated model's database).
=== Callback Options
All callbacks can be specified as a Symbol, Proc, or array of both/either
specifying a callback to call. Symbols are interpreted as instance methods
that are called with the associated object. Procs are called with the receiver
as the first argument and the associated object as the second argument. If
an array is given, all of them are called in order.
Before callbacks are often used to check preconditions, they can call Model#cancel_action
to signal Sequel to abort the modification. If any before callback
calls cancel_action, the remaining before callbacks are not called and the modification
is aborted.
==== :before_add [+one_to_many+, +many_to_many+]
Called before adding an object to the association:
class Artist
# Don't allow adding an album to an artist if it has no tracks
one_to_many :albums, before_add: lambda{|ar, al| ar.cancel_action if al.num_tracks == 0}
end
==== :after_add [+one_to_many+, +many_to_many+]
Called after adding an object to the association:
class Artist
# Log all associations of albums to an audit logging table
one_to_many :albums, after_add: :log_add_album
private
def log_add_album(album)
DB[:audit_logs].insert(log: "Album #{album.inspect} associated to #{inspect}")
end
end
==== :before_remove [+one_to_many+, +many_to_many+]
Called before removing an object from the association using remove_association:
class Artist
# Don't allow removing a self-titled album
one_to_many :albums, before_remove: lambda{|ar, al| ar.cancel_action if al.name == ar.name}
end
This is not called when using remove_all_association.
==== :after_remove [+one_to_many+, +many_to_many+]
Called after removing an object from the association using remove_association:
class Artist
# Log all disassociations of albums to an audit logging table
one_to_many :albums, after_remove: :log_remove_album
private
def log_remove_album(album)
DB[:audit_logs].insert(log: "Album #{album.inspect} disassociated from #{inspect}")
end
end
This is not called when using remove_all_association.
==== :before_set [+many_to_one+, +one_to_one+]
Called before the _association= method is called to modify the objects:
class Album
# Don't associate the album with an artist if the year the album was
# released is less than the year the artist/band started.
many_to_one :artist, before_set: lambda{|al, ar| al.cancel_action if al.year < ar.year_started}
end
==== :after_set [+many_to_one+, +one_to_one+]
Called after the _association= method is called to modify the objects:
class Album
# Log all disassociations of albums to an audit logging table
many_to_one :artist, after_set: :log_artist_set
private
def log_artist_set(artist)
DB[:audit_logs].insert(log: "Artist for album #{inspect} set to #{artist.inspect}")
end
end
==== :after_load
Called after retrieving the associated records from the database.
class Artist
# Cache all album names to a single string when retrieving the albums.
one_to_many :albums, after_load: :cache_album_names
attr_reader :album_names
private
def cache_album_names(albums)
@album_names = albums.map(&:name).join(", ")
end
end
Generally used if you know you will always want a certain action done
when retrieving the association.
For +one_to_many+ and +many_to_many+ associations, both the argument to
symbol callbacks and the second argument to proc callbacks will be an
array of associated objects instead of a single object.
==== :uniq [+many_to_many+]
Adds a after_load callback that makes the array of objects unique. In many
cases, using the :distinct option is a better approach.
=== Eager Loading via eager (query per association) Options
==== :eager
The associations to eagerly load via eager when loading the associated object(s).
This is useful for example if you always want to eagerly load dependent
associations when loading this association.
For example, if you know that any time that you want to load an artist's
albums, you are also going to want access to the album's tracks as well:
# Eager load tracks when loading the albums
Artist.one_to_many :albums, eager: :tracks
You can also use a hash or array to specify multiple dependent associations
to eagerly load:
# Eager load the albums' tracks and the tracks' tags when loading the albums
Artist.one_to_many :albums, eager: {tracks: :tags}
# Eager load the albums' tags and tracks when loading the albums
Artist.one_to_many :albums, eager: [:tags, :tracks]
# Eager load the albums' tags, tracks, and tracks' tags when loading the albums
Artist.one_to_many :albums, eager: [:tags, {tracks: :tags}]
==== :eager_loader
A custom loader to use when eagerly load associated objects via eager.
For many details and examples of custom eager loaders, please see the
{Advanced Associations guide}[rdoc-ref:doc/advanced_associations.rdoc].
==== :eager_loader_key
A symbol for the key column to use to populate the key hash for the eager
loader. Generally does not need to be set manually, defaults to the key
method used. Can be set to nil to not populate the key hash (better for
performance if a custom eager loader does not use the key_hash).
==== :eager_block
If given, should be a proc to use instead of the association method block
when eagerly loading. To not use a block when eager loading when one is
used normally, set to nil. It's very uncommon to need this option.
=== Eager Loading via eager_graph (one query with joins) Options
==== :eager_graph
The associations to eagerly load via eager_graph when loading the associated
object(s). This is useful for example if you always want to eagerly load dependent
associations when loading this association, but you want to filter or order the
association based on dependent associations:
Artist.one_to_many :albums_with_short_tracks, class: :Album, eager_graph: :tracks do |ds|
ds.where{tracks[:seconds] < 120}
end
Artist.one_to_many :albums_by_track_name, class: :Album, eager_graph: :tracks do |ds|
ds.order{tracks[:name]}
end
You can also use a hash or array of arguments for :eager_graph, similar to
what the :eager option accepts.
==== :graph_conditions
The additional conditions to use on the SQL join when eagerly loading the
association via eager_graph. Should be a hash or an array of two element
arrays. If not specified, the :conditions option is used if it is a hash or
array of two element arrays.
Artist.one_to_many :active_albums, class: :Album, graph_conditions: {active: true}
Note that these conditions on the association are in addition to the default
conditions specified by the foreign/primary keys. If you want to replace
the conditions specified by the foreign/primary keys, you need the
:graph_only_conditions options.
==== :graph_block
The block to pass to Dataset#join_table when eagerly loading the association
via eager_graph. This is useful to specify conditions that can't be specified
in a hash or array of two element arrays.
Artist.one_to_many :gold_albums, class: :Album,
graph_block: proc{|j,lj,js| Sequel[j][:copies_sold] > 500000}
==== :graph_join_type
The type of SQL join to use when eagerly loading the association via
eager_graph. Defaults to :left_outer. This is useful if you want to
ensure that only artists that have albums are returned:
Artist.one_to_many :albums, graph_join_type: :inner
# Will exclude artists without an album
Artist.eager_graph(:albums).all
==== :graph_select
A column or array of columns to select from the associated table
when eagerly loading the association via eager_graph. Defaults to all
columns in the associated table.
==== :graph_only_conditions
The conditions to use on the SQL join when eagerly loading the association via
eager_graph, instead of the default conditions specified by the
foreign/primary keys. This option causes the :graph_conditions option to be
ignored. This can be useful if the keys you are using are strings and you
want to do a case insensitive comparison. For example, let's say that instead
of integer keys, you used string keys based on the album or artist name, and
that the album was associated to the artist by name. However, you weren't
enforcing case sensitivity between the keys, so you still want to return albums
where the artist's name differs in case:
Artist.one_to_many :albums, key: :artist_name,
graph_only_conditions: nil,
graph_block: (proc do |j,lj,js|
{Sequel.function(:lower, Sequel[j][:artist_name])=> Sequel.function(:lower, Sequel[lj][:name])}
end)
Note how :graph_only_conditions is set to nil to ignore any existing conditions,
and :graph_block is used to set up the case insensitive comparison.
Another case where :graph_only_conditions may be used is if you want to use
a JOIN USING or NATURAL JOIN for the graph:
# JOIN USING
Artist.one_to_many :albums, key: :artist_name, graph_only_conditions: [:artist_name]
# NATURAL JOIN
Artist.one_to_many :albums, key: :artist_name, graph_only_conditions: nil, graph_join_type: :natural
==== :graph_alias_base
The base name to use for the table alias when eager graphing. Defaults to the name
of the association. If the alias name has already been used in the query, Sequel will create
a unique alias by appending a numeric suffix (e.g. alias_0, alias_1, ...) until the alias is
unique.
This is mostly useful if you have associations with the same name in many models, and you want
to be able to easily tell which table alias corresponds to which association when eagerly
graphing multiple associations with the same name.
You can override this option on a per-eager_graph basis by specifying the association as an
SQL::AliasedExpression instead of a symbol:
Album.eager_graph(Sequel.as(:artist, :a))
==== :eager_grapher
Sets up a custom grapher to use when eager loading the objects via eager_graph.
This is the eager_graph analogue to the :eager_loader option. This isn't generally
needed, as one of the other eager_graph related association options is usually sufficient.
If specified, should be a proc that accepts a single hash argument, which will contain
at least the following keys:
:callback :: A callback proc used to dynamically modify the dataset to graph into the
current dataset, before such graphing is done. This is nil if no callback
proc is used.
:implicit_qualifier :: The alias that was used for the current table (since you can cascade associations).
:join_type :: Override the join type to use when graphing.
:limit_strategy :: The limit strategy symbol to use when graphing (for limited associations only)
:self :: The dataset that is doing the eager loading
:table_alias :: An alias to use for the table to graph for this association.
Example:
Artist.one_to_many :self_title_albums, class: :Album,
eager_grapher: (lambda do |eo|
eo[:self].graph(:albums, {artist_id: :id, name: :name},
table_alias: eo[:table_alias], implicit_qualifier: eo[:implicit_qualifier])
end)
==== :order_eager_graph
Whether to add the order to the dataset's order when graphing via eager_graph.
Defaults to true, so set to false to disable.
Sequel has to do some guess work when attempting to add the association's
order to an eager_graphed dataset. In most cases it does so correctly, but
if it has problems, you'll probably want to set this option to false.
==== :graph_order
Override the order added when using eager_graph, instead of using the one
defined in :order. This is useful if :order contains qualified identifiers,
as the qualifiers may not match the aliases automatically used by eager_graph.
This should contain unqualified identifiers, and eager_graph will automatically
qualify them with the appropriate alias.
==== :graph_join_table_conditions [+many_to_many+, +one_through_one+]
The additional conditions to use on the SQL join for the join table when
eagerly loading the association via eager_graph. Should be a hash or an array
of two element arrays.
Let's say you have a database of people, colleges, and a table called
degrees_received that includes a string field specifying the name of the
degree, and you want to eager load all colleges for people where the person
has received a specific degree:
Person.many_to_many :bs_degree_colleges, class: :College,
join_table: :degrees_received,
graph_join_table_conditions: {degree: 'BS'}
==== :graph_join_table_block [+many_to_many+, +one_through_one+]
The block to pass to join_table for the join table when eagerly loading the
association via eager_graph. This is used for similar reasons as :graph_block,
but is only used for +many_to_many+ associations when graphing the join
table into the dataset. It's used in the same place as
:graph_join_table_conditions but like :graph_block, is needed for situations
where the conditions can't be specified as a hash or array of two element
arrays.
Let's say you have a database of people, colleges, and a table called
degrees_received that includes a string field specifying the name of the
degree, and you want to eager load all colleges for people where the person
has received a bachelor's degree (degree starting with B):
Person.many_to_many :bachelor_degree_colleges, class: :College,
join_table: :degrees_received,
graph_join_table_block: proc{|j,lj,js| Sequel[j][:degree].like('B%')}
This should be done when graphing the join table, instead of when graphing the
final table, as :degree is a column of the join table.
==== :graph_join_table_join_type [+many_to_many+, +one_through_one+]
The type of SQL join to use for the join table when eagerly loading the
association via eager_graph. Defaults to the :graph_join_type option or
:left_outer. This exists mainly for consistency in the unlikely case that
you want to use a different join type when JOINing to the join table then
you want to use for JOINing to the final table
==== :graph_join_table_only_conditions [+many_to_many+, +one_through_one+]
The conditions to use on the SQL join for the join table when eagerly loading
the association via eager_graph, instead of the default conditions specified
by the foreign/primary keys. This option causes the
:graph_join_table_conditions option to be ignored. This is only useful if
you want to replace the default foreign/primary key conditions that Sequel
would use when eagerly graphing.
=== Associations Based on SQL Expressions Options
Sequel's associations can work not just with columns, but also with
arbitrary SQL expressions. For example, on PostgreSQL, you can store
foreign keys to other tables in hstore, json, or jsonb columns, and Sequel
can work with such constructs, including full support for
eager loading.
There's actually two parts to supporting associations based on SQL
expressions. First is you must have an instance method in the model
that returns the value that the SQL expression would return. Second
is you must have an SQL expression object. If Sequel has access to
a model instance and needs to get the value of the expression, it
calls the method to get the value. If Sequel does not have access
to a model instance, but needs to use the SQL expression in a query,
it will use the SQL expression object.
Below is an example storing foreign keys to other tables in a
PostgreSQL hstore column, using the +pg_json+ and +pg_json_ops+
extensions.
# Example schema:
# albums artists
# :id /---> :id
# :meta ---/ :name
# :name
class Album < Sequel::Model
many_to_one :artist, key_column: Sequel.pg_jsonb(:meta)['artist_id'].cast(String).cast(Integer)
def artist_id
meta['artist_id'].to_i
end
end
class Artist < Sequel::Model
one_to_many :albums, key: Sequel.pg_jsonb(:meta)['artist_id'].cast(String).cast(Integer), key_method: :artist_id
end
# Example schema:
# albums albums_artists artists
# :id <----- :meta -------> :id
# :name :name
class Album < Sequel::Model
many_to_many :artists, left_key: Sequel.pg_jsonb(:meta)['album_id'].cast(String).cast(Integer),
right_key: Sequel.pg_jsonb(:meta)['artist_id'].cast(String).cast(Integer)
end
class Artist < Sequel::Model
many_to_many :albums, left_key: Sequel.pg_jsonb(:meta)['artist_id'].cast(String).cast(Integer),
right_key: Sequel.pg_jsonb(:meta)['album_id'].cast(String).cast(Integer)
end
==== :key_column [+many_to_one+]
Like the :key option, but :key references the method name, while
:key_column references the underlying column/expression.
==== :primary_key_method [+many_to_one+]
Like the :primary_key option, but :primary_key references the column/expression
name, while :primary_key_method references the method name.
==== :primary_key_column [+one_to_many+, +one_to_one+]
Like the :primary_key option, but :primary_key references the method name, while
:primary_key_column references the underlying column/expression.
==== :key_method [+one_to_many+, +one_to_one+]
Like the :key option, but :key references the column/expression
name, while :key_method references the method name.
==== :left_primary_key_column [+many_to_many+, +one_through_one+]
Like the :left_primary_key option, but :left_primary_key references the method name, while
:left_primary_key_column references the underlying column/expression.
==== :right_primary_key_method [+many_to_many+, +one_through_one+]
Like the :right_primary_key option, but :right_primary_key references the column/expression
name, while :right_primary_key_method references the method name.
=== Advanced Options
==== :reciprocal
The symbol name of the reciprocal association, if it exists. By default,
Sequel will try to determine it by looking at the associated model's
associations for a association that matches the current association's key(s).
Set to nil to not use a reciprocal.
Reciprocals are used in Sequel to modify the matching cached associations
in associated objects when calling association methods on the current object.
For example, when you retrieve objects in a one_to_many association, Sequel will
automatically set the matching many_to_one association in the associated
objects. The result of this is that code that does this:
@artist.albums.each{|album| album.artist.name}
only does one database query, because when the @artist's albums are retrieved,
the cached artist association for each album is set to @artist.
In addition to the one_to_many retrieval case, the association modification
methods affect the reciprocals as well:
# Sets the cached artist association for @album to @artist
@artist.add_album(@album)
# Sets the cached artist association for @album to nil
@artist.remove_album(@album)
# Sets the cached artist association to nil for the @artist's
# cached albums association
@artist.remove_all_albums
# Remove @album from the artist1's cached albums association, and add @album
# to @artist2's cached albums association.
@album.artist # @artist1
@album.artist = @artist2
Sequel can usually guess the correct reciprocal, but if you have multiple
associations to the same associated class that use the same keys, you may
want to specify the :reciprocal option manually to ensure the correct
one is used.
==== :read_only
For +many_to_one+ and +one_to_one+ associations, do not add a setter method.
For +one_to_many+ and +many_to_many+, do not add the add_association,
remove_association, or remove_all_association methods.
If you are not using the association modification methods, setting this
value to true will save memory.
==== :validate
Set to false to not validate when implicitly saving any associated object.
When using the +one_to_many+ association modification methods, the +one_to_one+
setter method, or creating a new object by passing a hash to the
add_association method, Sequel will automatically save the object.
If you don't want to validate objects when these implicit saves are done,
the validate option should be set to false.
==== :raise_on_save_failure [+one_to_many+ associations]
Set to false to not raise an exception when validation or a before hook
fails when implicitly saving an associated object in the add_* or remove_*
methods. This mirrors the raise_on_save_failure model setting, which these
methods do not respect (by design).
If you use this option, you must explicitly check all add_* and remove_* return
values to see if they were successful.
==== :allow_eager
If set to false, you cannot load the association eagerly via eager or
eager_graph.
Artist.one_to_many :albums, allow_eager: false
Artist.eager(:albums) # Raises Sequel::Error
Artist.eager_graph(:albums) # Raises Sequel::Error
This is usually used if the association dataset depends on specific values in
model instance that would not be valid when eager loading for multiple
instances.
==== :allow_eager_graph
If set to false, you cannot load the association eagerly via eager_graph.
Artist.one_to_many :albums, allow_eager_graph: false
Artist.eager(:albums) # Allowed
Artist.eager_graph(:albums) # Raises Sequel::Error
This is useful if you still want to allow loading via eager, but do not want
to allow loading via eager graph, possibly because the association does not
support joins.
==== :allow_filtering_by
If set to false, you cannot use the association when filtering.
Artist.one_to_many :albums, allow_filtering_by: false
Artist.where(albums: Album.where(name: 'A')).all # Raises Sequel::Error
This is useful if such filtering cannot work, such as when a subquery cannot
be used because the necessary tables are not in the same database.
==== :instance_specific
This allows you to override the setting of whether the dataset contains instance
specific code. If you are passing a block to the association,
Sequel sets this to true by default, which disables some optimizations that
would be invalid if the association is instance specific. If you know that the
block does not contain instance specific code, you can set this to false to
reenable the optimizations. Instance specific code is mostly commonly calling
model instance methods inside an association block, but also
includes cases where the association block can return different values based
on the runtime environment, such as calls to Time.now in the block.
Associations that use the :dataset option are always considered instance specific,
even if explicitly specified otherwise.
==== :cartesian_product_number
The number of joins completed by this association that could cause more
than one row for each row in the current table (default: 0 for *_one
associations, 1 for *_to_many associations).
This should only be modified in specific cases. For example, if you have
a one_to_one association that can actually return more than one row
(where the default association method will just return the first), or
a many_to_many association where there is a unique index in the join table
so that you know only one object will ever be associated through the
association.
==== :class_namespace
If the :class option is specified as a symbol or string, the default namespace
in which to look up the class. If the :class option is not specified as a
symbol or string, this option is ignored. This namespace can be overridden
by starting the string or symbol with :::
Foo::Album.many_to_one :artist, class: "Artist" # Uses Artist
Foo::Album.many_to_one :artist, class: "Artist", class_namespace: 'Foo' # Uses Foo::Artist
Foo::Album.many_to_one :artist, class: "Foo::Artist", class_namespace: 'Foo' # Uses Foo::Foo::Artist
Foo::Album.many_to_one :artist, class: "::Artist", class_namespace: 'Foo' # Uses Artist
Foo::Album.many_to_one :artist, class: "::Foo::Artist", class_namespace: 'Foo' # Uses Foo::Artist
==== :methods_module
The module that the methods created by the association will be placed
into. Defaults to the module containing the model's columns. Any module
given to this option is not included in the model's class automatically,
so you are responsible for doing that manually.
This is only useful in rare cases, such as when a plugin that adds
associations depends on another plugin that defines instance methods of
the same name. In that case, the instance methods of the dependent
plugin would override the association methods created by the main
plugin.
==== :eager_limit_strategy
This setting determines what strategy to use for eager loading the associations
that use the :limit setting to limit the number of returned records. You
can't use LIMIT directly, since you want a limit for each group of
associated records, not a LIMIT on the total number of records returned
by the dataset.
In general, Sequel picks an appropriate strategy, so it is not usually
necessary to specify a strategy. You can specify true for this option to
have Sequel choose which strategy to use (this is the default). You can
specify a symbol to manually choose a strategy. The available strategies are:
:union :: Uses one or more UNION queries with a subquery for each record
you are eagerly loading for (this is the default strategy).
:distinct_on :: Uses DISTINCT ON to ensure only the first matching record
is loaded (only used for one_*_one associations without
offsets on PostgreSQL).
:window_function :: Uses a ROW_NUMBER window functions to ensure the
correctly limited/offset records are returned.
:ruby :: Uses ruby array slicing to emulate database limiting/offsetting.
==== :subqueries_per_union
The number of subqueries per union query to use when eager loading for a
limited association using a union strategy. This defaults to 40, but the
optimum number depends on the database in use and the latency between the
database and the application.
==== :filter_limit_strategy
The strategy to use when filtering by limited associations. In general
Sequel will choose either a :distinct_on, :window_function, or
:correlated_subquery strategy based on the association type and what
the database supports, but you can override that if necessary using
this option.
sequel-5.63.0/doc/bin_sequel.rdoc 0000664 0000000 0000000 00000014612 14342141206 0016650 0 ustar 00root root 0000000 0000000 = bin/sequel
bin/sequel is the name used to refer to the "sequel" command line tool that ships with the sequel gem. By default, bin/sequel provides an IRB shell with the +DB+ constant set to a Sequel::Database object created using the database connection string provided on the command line. For example, to connect to a new in-memory SQLite database using the sqlite adapter, you can use the following:
sequel sqlite:/
This is very useful for quick testing of ideas, and does not affect the environment, since the in-memory SQLite database is destroyed when the program exits.
== Running from a git checkout
If you've installed the sequel gem, then just running "sequel" should load the program, since rubygems should place the sequel binary in your load path. However, if you want to run bin/sequel from the root of a repository checkout, you should probably do:
ruby bin/sequel
== Choosing the Database to Connect to
=== Connection String
In general, you probably want to provide a connection string argument to bin/sequel, indicating the adapter and database connection information you want to use. For example:
sequel sqlite:/
sequel postgres://user:pass@host/database_name
sequel mysql2://user:pass@host/database_name
See the {Connecting to a database guide}[rdoc-ref:doc/opening_databases.rdoc] for more details about and examples of connection strings.
=== YAML Connection File
Instead of specifying the database connection using a connection string, you can provide the path to a YAML configuration file containing the connection information. This YAML file can contain a single options hash, or it can contain a nested hash, where the top-level hash uses environment keys with hash values for
each environment. Using the -e option with a yaml connection file, you can choose which environment to use if using a nested hash.
sequel -e production config/database.yml
Note that bin/sequel does not directly support ActiveRecord YAML configuration files, as they use different names for some options.
=== Mock Connection
If you don't provide a connection string or YAML connection file, Sequel will start with a mock database. The mock database allows you to play around with Sequel without any database at all, and can be useful if you just want to test things out and generate SQL without actually getting results from a database.
sequel
Sequel also has the ability to use the mock adapter with database-specific syntax, allowing you to pretend you are connecting to a specific type of database without actually connecting to one. To do that, you need to use a connection string:
sequel mock://postgres
== Not Just an IRB shell
bin/sequel is not just an IRB shell, it can also do far more.
=== Execute Code
bin/sequel can also be used to execute other ruby files with +DB+ preset to the database given on the command line:
sequel postgres://host/database_name path/to/some_file.rb
On modern versions of Linux, this means that you can use bin/sequel in a shebang line:
#!/path/to/bin/sequel postgres://host/database_name
If you want to quickly execute a small piece of ruby code, you can use the -c option:
sequel -c "p DB.tables" postgres://host/database_name
Similarly, if data is piped into bin/sequel, it will be executed:
echo "p DB.tables" | sequel postgres://host/database_name
=== Migrate Databases
With -m option, Sequel will migrate the database given using the migration directory provided by -m:
sequel -m /path/to/migrations/dir postgres://host/database
You can use the -M attribute to set the version to migrate to:
sequel -m /path/to/migrations/dir -M 3 postgres://host/database
See the {migration guide}[rdoc-ref:doc/migration.rdoc] for more details about migrations.
=== Dump Schemas
Using the -d or -D options, Sequel will dump the database's schema in Sequel migration format to the standard output:
sequel -d postgres://host/database
To save this information to a file, use a standard shell redirection:
sequel -d postgres://host/database > /path/to/migrations/dir/001_base_schema.rb
The -d option dumps the migration in database-independent format, the -D option dumps it in database-specific format.
Note that the support for dumping schema is fairly limited. It doesn't handle database views, functions, triggers, schemas, partial indexes, functional indexes, and many other things. You should probably use the database specific tools to handle those.
The -S option dumps the schema cache for all tables in the database, which can speed up the usage of Sequel with models when using the schema_caching extension. You should provide this option with the path to which to dump the schema:
sequel -S /path/to/schema_cache.db postgres://host/database
=== Copy Databases
Using the -C option, Sequel can copy the contents of one database to another, even between different database types. Using this option, you provide two connection strings on the command line:
sequel -C mysql://host1/database postgres://host2/database2
This copies the table structure, table data, indexes, and foreign keys from the MySQL database to the PostgreSQL database.
Note that the support for copying is fairly limited. It doesn't handle database views, functions, triggers, schemas, partial indexes, functional indexes, and many other things. Also, the data type conversion may not be exactly what you want. It is best designed for quick conversions and testing. For serious production use, use the database's tools to copy databases for the same database type, and for different database types, use the Sequel API.
== Other Options
Other options not mentioned above are explained briefly here.
=== -E
-E logs all SQL queries to the standard output, so you can see all SQL that Sequel is sending the database.
=== -I include_directory
-I is similar to ruby -I, and specifies an additional $LOAD_PATH directory.
=== -l log_file
-l is similar to -E, but logs all SQL queries to the given file.
=== -L load_directory
-L loads all *.rb files under the given directory. This is usually used to load Sequel::Model classes into bin/sequel.
=== -N
-N skips testing the connection when creating the Database object. This is rarely needed.
=== -r require_lib
-r is similar to ruby -r, requiring the given library.
=== -t
-t tells bin/sequel to output full backtraces in the case of an error, which can aid in debugging.
=== -h
-h prints the usage information for bin/sequel.
=== -v
-v prints the Sequel version in use.
sequel-5.63.0/doc/cheat_sheet.rdoc 0000664 0000000 0000000 00000014744 14342141206 0017004 0 ustar 00root root 0000000 0000000 = Cheat Sheet
== Open a database
require 'sequel'
DB = Sequel.sqlite('my_blog.db')
DB = Sequel.connect('postgres://user:password@localhost/my_db')
DB = Sequel.postgres('my_db', user: 'user', password: 'password', host: 'localhost')
DB = Sequel.ado('mydb')
== Open an SQLite memory database
Without a filename argument, the sqlite adapter will setup a new sqlite database in memory.
DB = Sequel.sqlite
== Logging SQL statements
require 'logger'
DB = Sequel.sqlite(loggers: [Logger.new($stdout)])
# or
DB.loggers << Logger.new($stdout)
== Using raw SQL
DB.run "CREATE TABLE users (name VARCHAR(255) NOT NULL, age INT(3) NOT NULL)"
dataset = DB["SELECT age FROM users WHERE name = ?", name]
dataset.map(:age)
DB.fetch("SELECT name FROM users") do |row|
p row[:name]
end
== Create a dataset
dataset = DB[:items]
dataset = DB.from(:items)
== Most dataset methods are chainable
dataset = DB[:managers].where(salary: 5000..10000).order(:name, :department)
== Insert rows
dataset.insert(name: 'Sharon', grade: 50)
== Retrieve rows
dataset.each{|r| p r}
dataset.all # => [{...}, {...}, ...]
dataset.first # => {...}
dataset.last # => {...}
== Update/Delete rows
dataset.exclude(:active).delete
dataset.where{price < 100}.update(active: true)
dataset.where(:active).update(price: Sequel[:price] * 0.90)
= Merge rows
dataset.
merge_using(:table, col1: :col2).
merge_insert(col3: :col4).
merge_delete{col5 > 30}.
merge_update(col3: Sequel[:col3] + :col4)
== Datasets are Enumerable
dataset.map{|r| r[:name]}
dataset.map(:name) # same as above
dataset.inject(0){|sum, r| sum + r[:value]}
dataset.sum(:value) # better
== Filtering (see also {Dataset Filtering}[rdoc-ref:doc/dataset_filtering.rdoc])
=== Equality
dataset.where(name: 'abc')
=== Inequality
dataset.where{value > 100}
dataset.exclude{value <= 100}
=== Inclusion
dataset.where(value: 50..100)
dataset.where{(value >= 50) & (value <= 100)}
dataset.where(value: [50,75,100])
dataset.where(id: other_dataset.select(:other_id))
=== Subselects as scalar values
dataset.where{price > dataset.select(avg(price) + 100)}
=== LIKE/Regexp
DB[:items].where(Sequel.like(:name, 'AL%'))
DB[:items].where(name: /^AL/)
=== AND/OR/NOT
DB[:items].where{(x > 5) & (y > 10)}
# SELECT * FROM items WHERE ((x > 5) AND (y > 10))
DB[:items].where(Sequel.or(x: 1, y: 2) & Sequel.~(z: 3))
# SELECT * FROM items WHERE (((x = 1) OR (y = 2)) AND (z != 3))
=== Mathematical operators
DB[:items].where{x + y > z}
# SELECT * FROM items WHERE ((x + y) > z)
DB[:items].where{price - 100 < avg(price)}
# SELECT * FROM items WHERE ((price - 100) < avg(price))
=== Raw SQL Fragments
dataset.where(Sequel.lit('id= 1'))
dataset.where(Sequel.lit('name = ?', 'abc'))
dataset.where(Sequel.lit('value IN ?', [50,75,100]))
dataset.where(Sequel.lit('price > (SELECT avg(price) + 100 FROM table)'))
== Ordering
dataset.order(:kind) # kind
dataset.reverse(:kind) # kind DESC
dataset.order(Sequel.desc(:kind), :name) # kind DESC, name
== Limit/Offset
dataset.limit(30) # LIMIT 30
dataset.limit(30, 10) # LIMIT 30 OFFSET 10
dataset.limit(30).offset(10) # LIMIT 30 OFFSET 10
== Joins
DB[:items].left_outer_join(:categories, id: :category_id)
# SELECT * FROM items
# LEFT OUTER JOIN categories ON categories.id = items.category_id
DB[:items].join(:categories, id: :category_id).
join(:groups, id: Sequel[:items][:group_id])
# SELECT * FROM items
# INNER JOIN categories ON categories.id = items.category_id
# INNER JOIN groups ON groups.id = items.group_id
== Aggregate functions methods
dataset.count #=> record count
dataset.max(:price)
dataset.min(:price)
dataset.avg(:price)
dataset.sum(:stock)
dataset.group_and_count(:category).all
dataset.select_group(:category).select_append{avg(:price)}
== SQL Functions / Literals
dataset.update(updated_at: Sequel.function(:NOW))
dataset.update(updated_at: Sequel.lit('NOW()'))
dataset.update(updated_at: Sequel.lit("DateValue('1/1/2001')"))
dataset.update(updated_at: Sequel.function(:DateValue, '1/1/2001'))
== Schema Manipulation
DB.create_table :items do
primary_key :id
String :name, unique: true, null: false
TrueClass :active, default: true
foreign_key :category_id, :categories
DateTime :created_at, default: Sequel::CURRENT_TIMESTAMP, index: true
index [:category_id, :active]
end
DB.drop_table :items
== Aliasing
DB[:items].select(Sequel[:name].as(:item_name))
DB[:items].select(Sequel.as(:name, :item_name))
DB[:items].select{name.as(:item_name)}
# SELECT name AS item_name FROM items
DB[Sequel[:items].as(:items_table)].select{items_table[:name].as(:item_name)}
# SELECT items_table.name AS item_name FROM items AS items_table
== Transactions
DB.transaction do
# BEGIN
dataset.insert(first_name: 'Inigo', last_name: 'Montoya')
dataset.insert(first_name: 'Farm', last_name: 'Boy')
end
# COMMIT
Transactions are reentrant:
DB.transaction do
# BEGIN
DB.transaction do
dataset.insert(first_name: 'Inigo', last_name: 'Montoya')
end
end
# COMMIT
Transactions are aborted if an error is raised:
DB.transaction do
# BEGIN
raise "some error occurred"
end
# ROLLBACK issued and the error is re-raised
Transactions can also be aborted by raising Sequel::Rollback:
DB.transaction do
# BEGIN
raise(Sequel::Rollback)
end
# ROLLBACK issued and no error raised
Savepoints can be used if the database supports it:
DB.transaction do
dataset.insert(first_name: 'Farm', last_name: 'Boy') # Inserted
DB.transaction(savepoint: true) do # This savepoint is rolled back
dataset.insert(first_name: 'Inigo', last_name: 'Montoya') # Not inserted
raise(Sequel::Rollback)
end
dataset.insert(first_name: 'Prince', last_name: 'Humperdink') # Inserted
end
== Retrieving SQL
dataset.sql # "SELECT * FROM items"
dataset.insert_sql(a: 1) # "INSERT INTO items (a) VALUES (1)"
dataset.update_sql(a: 1) # "UPDATE items SET a = 1"
dataset.delete_sql # "DELETE FROM items"
== Basic introspection
dataset.columns # => [:id, :name, ...]
DB.tables # => [:items, ...]
DB.views # => [:new_items, ...]
DB.schema(:items) # => [[:id, {:type=>:integer, ...}], [:name, {:type=>:string, ...}], ...]
DB.indexes(:items) # => {:index_name => {:columns=>[:a], :unique=>false}, ...}
DB.foreign_key_list(:items) # => [{:name=>:items_a_fk, :columns=>[:a], :key=>[:id], :table=>:other_table}, ...]
sequel-5.63.0/doc/code_order.rdoc 0000664 0000000 0000000 00000007425 14342141206 0016633 0 ustar 00root root 0000000 0000000 = Code Order
In Sequel, the order in which code is executed during initialization is important. This
guide provides the recommended way to order your Sequel code. Some
of these guidelines are not strictly necessary, but others are, and
this guide will be specific about which are strictly necessary.
== Require Sequel
This is sort of a no-brainer, but you need to require the library
first. This is a strict requirement, none of the other code can
be executed unless the library has been required first. Example:
require 'sequel'
== Add Global Extensions
Global extensions are loaded with Sequel.extension, and affect
other parts of Sequel or the general ruby environment. It's not
necessary to load them first, but it is a recommended practice.
Example:
Sequel.extension :blank
== Add Extensions Applied to All Databases/Datasets
If you want database or datasets extensions applied to all databases
and datasets, you must use Sequel::Database.extension to load the
extension before connecting to a database. If you connect to a
database before using Sequel::Database.extension, it will not have
that extension loaded. Example:
Sequel::Database.extension :columns_introspection
== Connect to Databases
Connecting to a database is required before running any queries against
that database, or creating any datasets or models. You cannot create
model classes without having a database object created first. The
convention for an application with a single Database instance is to
store that instance in a constant named DB. Example:
DB = Sequel.connect('postgres://user:pass@host/database')
== Add Extensions Specific to a Database or All Datasets in that Database
If you want specific databases to use specific extensions, or have all
datasets in that database use a specific extension, you need to load that
extension into the database after creating it using
Sequel::Database#extension. Example:
DB.extension :pg_array
== Configure Global Model Behavior
If you want to change the configuration for all model classes, you must do
so before loading your model classes, as configuration is copied into the
subclass when model subclasses are created. Example:
Sequel::Model.raise_on_save_failure = false
== Add Global Model Plugins
If you want to load a plugin into all models classes, you must do so
before loading your model classes, as plugin specific data may need to be
copied into the subclass when model subclasses are created. Example:
Sequel::Model.plugin :prepared_statements
== Load Model Classes
After you have established a database connection, and configured your
global model configuration and global plugins, you can load your model
classes. It's recommended to have a separate file for each model class,
unless the model classes are very simple. Example:
Dir['./models/*.rb'].each{|f| require f}
== Finalize Associations and Freeze Model Classes and Database
After all the models have been setup, you can finalize the associations.
This can speed up association reflection methods by doing a lookup in
advance to find the associated class, and cache related association
information in the association itself.
Additionally, in production and testing, you should freeze the
model classes and Database instance, so that you can detect
unsafe runtime modification of the configuration:
model_classes.each(&:finalize_associations)
model_classes.each(&:freeze)
DB.freeze
The `subclasses` plugin can be used to keep track of all model classes
that have been setup in your application. Finalizing their associations
and freezing them can easily be achieved through the plugin:
# Register the plugin before setting up the models
Sequel::Model.plugin :subclasses
# ... setup models
# Now finalize associations & freeze models by calling the plugin:
Sequel::Model.freeze_descendents
sequel-5.63.0/doc/core_extensions.rdoc 0000664 0000000 0000000 00000026510 14342141206 0017731 0 ustar 00root root 0000000 0000000 = Sequel's Core Extensions
== Background
Historically, Sequel added methods to many of the core classes, and usage of those methods was the primary and recommended way to use Sequel. For example:
DB[:table].select(:column.cast(Integer)). # Symbol#cast
where(:column.like('A%')). # Symbol#like
order({1=>2}.case(0, :a)) # Hash#case
While Sequel never overrode any methods defined by ruby, it is possible that other libraries could define the same methods that Sequel defines, which could cause problems. Also, some rubyists do not like using libraries that add methods to the core classes.
Alternatives for the core extension methods were added to Sequel, so the query above could be written as:
DB[:table].select(Sequel.cast(:column, Integer)).
where(Sequel.like(:column, 'A%')).
order(Sequel.case({1=>2}, 0, :a))
or with virtual rows:
DB[:table].select{column.as(Integer)}.
where{column.like('A%')}.
order(Sequel.case({1=>2}, 0, :a))
Almost all of the core extension methods have a replacement on the Sequel module. So it is now up to the user which style to use. Using the methods on the Sequel module results in slightly more verbose code, but allows the code to work without modifications to the core classes.
== Issues
There is no recommendation on whether the core_extensions should be used or not. It is very rare that any of the methods added by core_extensions actually causes a problem, but some of them can make it more difficult to find other problems. For example, if you type:
do_something if value | other_value
while meaning to type:
do_something if value || other_value
and value is a Symbol, instead of a NoMethodError being raised because Symbol#| is not implemented by default, value | other_value will return a Sequel expression object, which if will evaluate as true, and do_something will be called.
== Usage
All of Sequel's extensions to the core classes are stored in Sequel's core_extensions extension, which you can load via:
Sequel.extension :core_extensions
== No Internal Dependency
Sequel has no internal dependency on the core extensions. This includes Sequel's core, Sequel::Model, and all plugins and extensions that ship with Sequel. However, it is possible that external plugins and extensions will depend on the core extensions. Such plugins and extensions should be updated so that they no longer depend on the core extensions.
== Refinements
Most of the these extensions can be added on a per-file basis using refinements (if you are using Ruby 2.0+). To use refinements, first load them:
Sequel.extension :core_refinements
Then for each file where you want to use the refinements:
using Sequel::CoreRefinements
== Core Extension Methods
This section will briefly describe all of the methods added to the core classes, and what the alternative method is that doesn't require the core extensions.
=== Symbol & String
==== as
Symbol#as and String#as return Sequel aliased expressions using the provided alias:
:a.as(:b) # SQL: a AS b
'a'.as(:b) # SQL: 'a' AS b
Alternatives:
Sequel[:a].as(:b)
Sequel.as(:a, :b)
==== cast
Symbol#cast and String#cast return Sequel cast expressions for typecasting in the database:
:a.cast(Integer) # SQL: CAST(a AS integer)
'a'.cast(Integer) # SQL: CAST('a' AS integer)
Alternatives:
Sequel[:a].cast(Integer)
Sequel.cast(:a, Integer)
==== cast_numeric
Symbol#cast_numeric and String#cast_numeric return Sequel cast expressions for typecasting in the database, defaulting to integers, where the returned expression is treated as an numeric value:
:a.cast_numeric # SQL: CAST(a AS integer)
'a'.cast_numeric(Float) # SQL: CAST('a' AS double precision)
Alternative:
Sequel[:a].cast_numeric
Sequel.cast_numeric(:a)
==== cast_string
Symbol#cast_string and String#cast_string return Sequel cast expressions for typecasting in the database, defaulting to strings, where the returned expression is treated as a string value:
:a.cast_string # SQL: CAST(a AS varchar(255))
'a'.cast_string(:text) # SQL: CAST('a' AS text)
Alternatives:
Sequel[:a].cast_string
Sequel.cast_string(:a)
=== Symbol
==== identifier
Symbol#identifier wraps the symbol in an Sequel identifier object. If symbol splitting is enabled (no longer the default), it also makes sure the symbol will not be split. If symbol splitting is disabled (the default), there is little reason to use this).
:column.identifier # SQL: column
Alternatives:
Sequel[:column]
Sequel.identifier(:column)
==== asc
Symbol#asc is used to define an ascending order on a column. It exists mostly for consistency with #desc, since ascending is the default order:
:a.asc # SQL: a ASC
Alternatives:
Sequel[:a].asc
Sequel.asc(:a)
==== desc
Symbol#desc is used to defined a descending order on a column. The returned value is usually passed to one of the dataset order methods.
:a.desc # SQL: a DESC
Alternatives:
Sequel[:a].desc
Sequel.desc(:a)
==== +, -, *, /
The standard mathematical operators are defined on Symbol, and return a Sequel numeric expression object representing the operation:
:a + :b # SQL: a + b
:a - :b # SQL: a - b
:a * :b # SQL: a * b
:a / :b # SQL: a / b
:a ** :b # SQL: power(a, b)
Sequel also supports ruby's coercion protocols on symbols (note that this does not work when using refinements):
1 + :b # SQL: 1 + b
Alternatives:
Sequel[:a] + :b
Sequel[:a] - :b
Sequel[:a] * :b
Sequel[:a] / :b
Sequel[:a] ** :b
Sequel.+(:a, :b)
Sequel.-(:a, :b)
Sequel.*(:a, :b)
Sequel./(:a, :b)
Sequel.**(:a, :b)
==== *
The * operator is overloaded on Symbol such that if it is called with no arguments, it represents a selection of all columns in the table:
:a.* # SQL: a.*
Alternative:
Sequel[:a].*
==== qualify
Symbol#qualify qualifies the identifier (e.g. a column) with a another identifier (e.g. a table):
:column.qualify(:table) # SQL: table.column
Alternative:
Sequel[:table][:column]
Note the reversed order of the arguments. For the Symbol#qualify method, the argument is the qualifier, while for Sequel[][], the first [] is the qualifier, and the second [] is the identifier.
==== like
Symbol#like returns a case sensitive LIKE expression between the identifier and the given argument:
:a.like('b%') # SQL: a LIKE 'b%' ESCAPE '\'
Alternatives:
Sequel[:a].like('b%')
Sequel.like(:a, 'b%')
==== ilike
Symbol#ilike returns a case insensitive LIKE expression between the identifier and the given argument:
:a.ilike('b%') # SQL: a ILIKE 'b%' ESCAPE '\'
Alternatives:
Sequel[:a].ilike('b%')
Sequel.ilike(:a, 'b%')
==== sql_subscript
Symbol#sql_subscript returns a Sequel expression representing an SQL array access:
:a.sql_subscript(1) # SQL: a[1]
Alternatives:
Sequel[:a].sql_subscript(1)
Sequel.subscript(:a, 1)
==== extract
Symbol#extract does a datetime part extraction from the receiver:
:a.extract(:year) # SQL: extract(year FROM a)
Alternatives:
Sequel[:a].extract(:year)
Sequel.extract(:year, :a)
==== sql_boolean, sql_number, sql_string
These Symbol methods are used to force the treating of the object as a specific SQL type, instead of as a general SQL type. For example:
:a.sql_boolean + 1 # NoMethodError
:a.sql_number << 1 # SQL: a << 1
:a.sql_string + 'a' # SQL: a || 'a'
Alternatives:
Sequel[:a].sql_boolean
Sequel[:a].sql_number
Sequel[:a].sql_string
==== sql_function
Symbol#sql_function returns an SQL function call expression object:
:now.sql_function # SQL: now()
:sum.sql_function(:a) # SQL: sum(a)
:concat.sql_function(:a, :b) # SQL: concat(a, b)
Alternatives:
Sequel[:sum].function(:a)
Sequel.function(:sum, :a)
=== String
==== lit
String#lit creates a literal string, using placeholders if any arguments are given. Literal strings are not escaped, they are treated as SQL code, not as an SQL string:
'a'.lit # SQL: a
'"a" = ?'.lit(1) # SQL: "a" = 1
Alternatives:
Sequel.lit('a')
Sequel.lit('a = ?', 1)
==== to_sequel_blob
String#to_sequel_blob returns the string wrapper in Sequel blob object. Often blobs need to be handled differently than regular strings by the database adapters.
"a\0".to_sequel_blob # SQL: X'6100'
Alternative:
Sequel.blob("a\0")
=== Hash, Array, & Symbol
==== ~
Array#~, Hash#~, and Symbol#~ treat the receiver as a conditions specifier, not matching all of the conditions:
~{a: 1, b: [2, 3]} # SQL: a != 1 OR b NOT IN (2, 3)
~[[:a, 1], [:b, [1, 2]]] # SQL: a != 1 OR b NOT IN (1, 2)
Alternatives:
~Sequel[a: 1, b: [2, 3]]
Sequel.~(a: 1, b: [2, 3])
=== Hash & Array
==== case
Array#case and Hash#case return an SQL CASE expression, where the keys are conditions and the values are results:
{{a: [2,3]} => 1}.case(0) # SQL: CASE WHEN a IN (2, 3) THEN 1 ELSE 0 END
[[{a: [2,3]}, 1]].case(0) # SQL: CASE WHEN a IN (2, 3) THEN 1 ELSE 0 END
Alternative:
Sequel.case({{a: [2,3]}=>1}, 0)
==== sql_expr
Array#sql_expr and Hash#sql_expr treat the receiver as a conditions specifier, matching all of the conditions in the array.
{a: 1, b: [2, 3]}.sql_expr # SQL: a = 1 AND b IN (2, 3)
[[:a, 1], [:b, [2, 3]]].sql_expr # SQL: a = 1 AND b IN (2, 3)
Alternative:
Sequel[a: 1, b: [2, 3]]
==== sql_negate
Array#sql_negate and Hash#sql_negate treat the receiver as a conditions specifier, matching none of the conditions in the array:
{a: 1, b: [2, 3]}.sql_negate # SQL: a != 1 AND b NOT IN (2, 3)
[[:a, 1], [:b, [2, 3]]].sql_negate # SQL: a != 1 AND b NOT IN (2, 3)
Alternative:
Sequel.negate(a: 1, b: [2, 3])
==== sql_or
Array#sql_or nd Hash#sql_or treat the receiver as a conditions specifier, matching any of the conditions in the array:
{a: 1, b: [2, 3]}.sql_or # SQL: a = 1 OR b IN (2, 3)
[[:a, 1], [:b, [2, 3]]].sql_or # SQL: a = 1 OR b IN (2, 3)
Alternative:
Sequel.or(a: 1, b: [2, 3])
=== Array
==== sql_value_list
Array#sql_value_list wraps the array in an array subclass, which Sequel will always treat as a value list and not a conditions specifier. By default, Sequel treats arrays of two element arrays as a conditions specifier.
DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]]) # SQL: (a, b) IN ((1 = 2) AND (3 = 4))
DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]].sql_value_list) # SQL: (a, b) IN ((1, 2), (3, 4))
Alternative:
Sequel.value_list([[1, 2], [3, 4]])
==== sql_string_join
Array#sql_string_join joins all of the elements in the array in an SQL string concatentation expression:
[:a].sql_string_join # SQL: a
[:a, :b].sql_string_join # SQL: a || b
[:a, 'b'].sql_string_join # SQL: a || 'b'
['a', :b].sql_string_join(' ') # SQL: 'a' || ' ' || b
Alternative:
Sequel.join(['a', :b], ' ')
=== Hash & Symbol
==== &
Hash#& and Symbol#& return a Sequel boolean expression, matching the condition specified by the receiver and the condition specified by the given argument:
:a & :b # SQL: a AND b
{a: 1} & :b # SQL: a = 1 AND b
{a: true} & :b # SQL: a IS TRUE AND b
Alternatives:
Sequel[a: 1] & :b
Sequel.&({a: 1}, :b)
==== |
Hash#| returns a Sequel boolean expression, matching the condition specified by the receiver or the condition specified by the given argument:
:a | :b # SQL: a OR b
{a: 1} | :b # SQL: a = 1 OR b
{a: true} | :b # SQL: a IS TRUE OR b
Alternative:
Sequel[a: 1] | :b
Sequel.|({a: 1}, :b)
sequel-5.63.0/doc/dataset_basics.rdoc 0000664 0000000 0000000 00000010270 14342141206 0017467 0 ustar 00root root 0000000 0000000 = Dataset Basics
== Introduction
Datasets are the primary way Sequel uses to access the database. While most database libraries have specific support for updating all records or only a single record, Sequel's ability to represent SQL queries themselves as datasets is what gives Sequel most of its power. This document aims to give a basic introduction to datasets and how to use them.
== Basics
The most basic dataset is the simple selection of all columns in a table:
ds = DB[:posts]
# SELECT * FROM posts
Here, DB represents your Sequel::Database object, and ds is your dataset, with the SQL query it represents below it.
One of the core dataset ideas that should be understood is that datasets are frozen and use a functional style of modification, in which methods called on the dataset return modified copies of the dataset, they don't modify the dataset themselves:
ds2 = ds.where(id: 1)
ds2
# SELECT * FROM posts WHERE id = 1
ds
# SELECT * FROM posts
Note how ds itself is not modified. This is because ds.where returns a modified copy of ds, instead of modifying ds itself. This makes using datasets both thread safe and easy to chain:
# Thread safe:
100.times do |i|
Thread.new do
ds.where(id: i).first
end
end
# Easy to chain:
ds3 = ds.select(:id, :name).order(:name).where{id < 100}
# SELECT id, name FROM posts WHERE id < 100 ORDER BY name
Thread safety you don't really need to worry about, but chainability is core to how Sequel is generally used. Almost all dataset methods that affect the SQL produced return modified copies of the receiving dataset.
Another important thing to realize is that dataset methods that return modified datasets do not execute the dataset's code on the database. Only dataset methods that return or yield results will execute the code on the database:
# No SQL queries sent:
ds3 = ds.select(:id, :name).order(:name).where{id < 100}
# Until you call a method that returns results
results = ds3.all
One important consequence of this API style is that if you use a method chain that includes both methods that return modified copies and a method that executes the SQL, the method that executes the SQL should generally be the last method in the chain:
# Good
ds.select(:id, :name).order(:name).where{id < 100}.all
# Bad
ds.all.select(:id, :name).order(:name).where{id < 100}
This is because all will return an array of hashes, and +select+, +order+, and +where+ are dataset methods, not array methods.
== Methods
Most Dataset methods that users will use can be broken down into two types:
* Methods that return modified datasets
* Methods that execute code on the database
=== Methods that return modified datasets
Most dataset methods fall into this category, which can be further broken down by the clause they affect:
SELECT:: select, select_all, select_append, select_group, select_more
FROM:: from, from_self
JOIN:: join, left_join, right_join, full_join, natural_join, natural_left_join, natural_right_join, natural_full_join, cross_join, inner_join, left_outer_join, right_outer_join, full_outer_join, join_table
WHERE:: where, filter, exclude, or, grep, invert, unfiltered
GROUP:: group, group_by, group_and_count, group_append, select_group, ungrouped
HAVING:: having, exclude_having, invert, unfiltered
ORDER:: order, order_by, order_append, order_prepend, order_more, reverse, reverse_order, unordered
LIMIT/OFFSET:: limit, offset, unlimited
compounds:: union, intersect, except
locking:: for_update, lock_style
common table expressions:: with, with_recursive
other:: distinct, naked, qualify, server, with_sql
=== Methods that execute code on the database
Most other dataset methods commonly used will execute the dataset's SQL on the database:
SELECT (All Records):: all, each, map, as_hash, to_hash_groups, select_map, select_order_map, select_hash, select_hash_groups
SELECT (First Record):: first, last, [], single_record
SELECT (Single Value):: get, single_value
SELECT (Aggregates):: count, avg, max, min, sum
INSERT:: insert, <<, import, multi_insert
UPDATE:: update
DELETE:: delete
other:: columns, columns!, truncate
=== Other methods
See the Sequel::Dataset RDoc for other methods that are less commonly used.
sequel-5.63.0/doc/dataset_filtering.rdoc 0000664 0000000 0000000 00000020253 14342141206 0020210 0 ustar 00root root 0000000 0000000 = Dataset Filtering
Sequel is very flexible when it comes to filtering records. You can specify your conditions as a hash of values to compare against, or as ruby code that Sequel translates into SQL expressions, or as an SQL code fragment (with optional parameters), .
== Filtering using a hash
If you just need to compare records against values, you can supply a hash:
items.where(category: 'ruby').sql
# "SELECT * FROM items WHERE (category = 'ruby')"
Sequel can check for null values:
items.where(category: nil).sql
# "SELECT * FROM items WHERE (category IS NULL)"
Or compare two columns:
items.where{{x: some_table[:y]}}.sql
# "SELECT * FROM items WHERE (x = some_table.y)"
And also compare against multiple values:
items.where(category: ['ruby', 'perl']).sql
# "SELECT * FROM items WHERE (category IN ('ruby', 'perl'))"
Ranges (both inclusive and exclusive) can also be used:
items.where(price: 100..200).sql
# "SELECT * FROM items WHERE (price >= 100 AND price <= 200)"
items.where(price: 100...200).sql
# "SELECT * FROM items WHERE (price >= 100 AND price < 200)"
== Filtering using an array
If you need to select multiple items from a dataset, you can supply an array:
items.where(id: [1, 38, 47, 99]).sql
# "SELECT * FROM items WHERE (id IN (1, 38, 47, 99))"
== Filtering using expressions
You can pass a block to where (referred to as a virtual row block), which is evaluated in a special context:
items.where{price * 2 < 50}.sql
# "SELECT * FROM items WHERE ((price * 2) < 50)
This works for the standard inequality and arithmetic operators:
items.where{price + 100 < 200}.sql
# "SELECT * FROM items WHERE ((price + 100) < 200)
items.where{price - 100 > 200}.sql
# "SELECT * FROM items WHERE ((price - 100) > 200)
items.where{price * 100 <= 200}.sql
# "SELECT * FROM items WHERE ((price * 100) <= 200)
items.where{price / 100 >= 200}.sql
# "SELECT * FROM items WHERE ((price / 100) >= 200)
items.where{price ** 2 >= 200}.sql
# "SELECT * FROM items WHERE (power(price, 2) >= 200)
You use the overloaded bitwise and (&) and or (|) operators to combine expressions:
items.where{(price + 100 < 200) & (price * 100 <= 200)}.sql
# "SELECT * FROM items WHERE (((price + 100) < 200) AND ((price * 100) <= 200))
items.where{(price - 100 > 200) | (price / 100 >= 200)}.sql
# "SELECT * FROM items WHERE (((price - 100) > 200) OR ((price / 100) >= 200))
To filter by equality, you use the standard hash, which can be combined with other expressions using Sequel.& and Sequel.|:
items.where{Sequel.&({category: 'ruby'}, (price + 100 < 200))}.sql
# "SELECT * FROM items WHERE ((category = 'ruby') AND ((price + 100) < 200))"
You can also use the =~ operator:
items.where{(category =~ 'ruby') & (price + 100 < 200)}.sql
# "SELECT * FROM items WHERE ((category = 'ruby') AND ((price + 100) < 200))"
This works with other hash values, such as arrays and ranges:
items.where{Sequel.|({category: ['ruby', 'other']}, (price - 100 > 200))}.sql
# "SELECT * FROM items WHERE ((category IN ('ruby', 'other')) OR ((price - 100) > 200))"
items.where{(price =~ (100..200)) & :active}.sql
# "SELECT * FROM items WHERE ((price >= 100 AND price <= 200) AND active)"
== Filtering using a custom filter string
If you wish to include an SQL fragment as part of a filter, you need to wrap it with +Sequel.lit+ to mark that it is literal SQL code, and pass it to the #where method:
items.where(Sequel.lit('x < 10')).sql
# "SELECT * FROM items WHERE x < 10"
In order to prevent SQL injection, you can replace literal values with question marks and supply the values as additional arguments to +Sequel.lit+:
items.where(Sequel.lit('category = ?', 'ruby')).sql
# "SELECT * FROM items WHERE category = 'ruby'"
You can also use placeholders with :placeholder and a hash of placeholder values:
items.where(Sequel.lit('category = :category', category: "ruby")).sql
# "SELECT * FROM items WHERE category = 'ruby'"
In order to combine AND and OR together, you have a few options:
items.where(category: nil).or(category: "ruby")
# SELECT * FROM items WHERE (category IS NULL) OR (category = 'ruby')
This won't work if you add other conditions:
items.where(name: "Programming in Ruby").where(category: nil).or(category: 'ruby')
# SELECT * FROM items WHERE ((name = 'Programming in Ruby') AND (category IS NULL)) OR (category = 'ruby')
The OR applies globally and not locally. To fix this, use & and |:
items.where(Sequel[name: "Programming in Ruby"] & (Sequel[category: nil] | Sequel[category: "ruby"]))
# SELECT * FROM items WHERE ((name = 'Programming in Ruby') AND ((category IS NULL) OR (category = 'ruby')))
=== Specifying SQL functions
Sequel also allows you to specify functions by using the Sequel.function method:
items.literal(Sequel.function(:avg, :price)) # "avg(price)"
If you are specifying a filter/selection/order, you can use a virtual row block:
items.select{avg(price)}
=== Negating conditions
You can use the exclude method to exclude whole conditions:
items.exclude(category: 'ruby').sql
# "SELECT * FROM items WHERE (category != 'ruby')"
items.exclude(:active).sql
# "SELECT * FROM items WHERE NOT active"
items.exclude{price / 100 >= 200}.sql
# "SELECT * FROM items WHERE ((price / 100) < 200)
To exclude only parts of conditions, you can use when in combination with Sequel.~ or the ~ method on Sequel expressions:
items.where{Sequel.&(Sequel.~(category: 'ruby'), (price + 100 < 200))}.sql
# "SELECT * FROM items WHERE ((category != 'ruby') AND ((price + 100) < 200))"
items.where{~(category =~ 'ruby') & (price + 100 < 200)}.sql
# "SELECT * FROM items WHERE ((category != 'ruby') AND ((price + 100) < 200))"
You can also use the !~ method:
items.where{(category !~ 'ruby') & (price + 100 < 200)}.sql
# "SELECT * FROM items WHERE ((category != 'ruby') AND ((price + 100) < 200))"
=== Comparing against column references
You can also compare against other columns:
items.where{credit > debit}.sql
# "SELECT * FROM items WHERE (credit > debit)
Or against SQL functions:
items.where{price - 100 < max(price)}.sql
# "SELECT * FROM items WHERE ((price - 100) < max(price))"
== String search functions
You can search SQL strings in a case sensitive manner using the Sequel.like method:
items.where(Sequel.like(:name, 'Acme%')).sql
# "SELECT * FROM items WHERE (name LIKE 'Acme%' ESCAPE '\')"
You can search SQL strings in a case insensitive manner using the Sequel.ilike method:
items.where(Sequel.ilike(:name, 'Acme%')).sql
# "SELECT * FROM items WHERE (name ILIKE 'Acme%' ESCAPE '\')"
You can specify a Regexp as a hash value (or like argument), but this will probably only work
on PostgreSQL and MySQL:
items.where(name: /Acme.*/).sql
# "SELECT * FROM items WHERE (name ~ 'Acme.*')"
Like can also take more than one argument:
items.where(Sequel.like(:name, 'Acme%', /Beta.*/)).sql
# "SELECT * FROM items WHERE ((name LIKE 'Acme%' ESCAPE '\') OR (name ~ 'Beta.*'))"
== String concatenation
You can concatenate SQL strings using Sequel.join:
items.where(Sequel.join([:name, :comment]).like('Jo%nice%')).sql
# "SELECT * FROM items WHERE ((name || comment) LIKE 'Jo%nice%' ESCAPE '\')"
Sequel.join also takes a join argument:
items.where(Sequel.join([:name, :comment], ':').like('John:%nice%')).sql
# "SELECT * FROM items WHERE ((name || ':' || comment) LIKE 'John:%nice%' ESCAPE '\')"
== Filtering using sub-queries
Datasets can be used as subqueries. Subqueries can be very useful for filtering records, and many times provide a simpler alternative to table joins. Subqueries can be used in all forms of filters:
refs = consumer_refs.where(:logged_in).select(:consumer_id)
consumers.where(id: refs).sql
# "SELECT * FROM consumers WHERE (id IN (SELECT consumer_id FROM consumer_refs WHERE logged_in))"
Note that if you are checking for the inclusion of a single column in a subselect, the subselect should only select a single column.
== Using OR instead of AND
By default, if you chain calls to +where+, the conditions get ANDed together. If you want to use an OR for a condition, you can use the +or+ method:
items.where(name: 'Food').or(vendor: 1).sql
# "SELECT * FROM items WHERE ((name = 'Food') OR (vendor = 1))"
sequel-5.63.0/doc/extensions.rdoc 0000664 0000000 0000000 00000012473 14342141206 0016724 0 ustar 00root root 0000000 0000000 = Sequel Extensions
Sequel has an official extension system, for adding global, Database, and Dataset extensions.
== Global Extensions
Global extensions can add or modify the behavior of any part of Sequel. Technically, they are not limited to affecting Sequel, as they can also modify code outside of Sequel (e.g. the blank extension). However, extensions that modify things outside of Sequel generally do so only for backwards compatibility.
Global extensions are loaded via Sequel.extension:
Sequel.extension :named_timezones
All this does is require the relevent extension from sequel/extensions/named_timezones somewhere in the ruby path. Global extensions are just a simpler, consistent way to require code that modifies Sequel.
== Database Extensions
Database extensions should add or modify the behavior of a single Sequel::Database instance. They are loaded via Sequel::Database#extension:
DB.extension :server_block
The first thing that this does is load the relevent extension globally. However, Database extensions should be structured in a way that loading the relevent extension globally just adds a module with the related behavior, it doesn't modify any other state. After loading the extension globally, it modifies the related Sequel::Database object to modify it's behavior, usually by extending it with a module.
If you want a Database extension loaded into all future Database instances, you can use Sequel::Database.extension:
Sequel::Database.extension :server_block
All future Sequel::Database instances created afterward will then automatically have the server_block extension loaded.
== Dataset Extensions
Dataset extensions should add or modify the behavior of a single Sequel::Dataset instance. They are loaded via Sequel::Dataset#extension. Sequel::Dataset#extension returns a modifies copy of the dataset that includes the extension (similar to how most dataset query methods work):
ds = DB[:a].extension(:columns_introspection)
The first thing loading a Dataset extension does is load the relevent extension globally. Similar to Database extensions, loading a Dataset extension globally should not affect state other than maybe adding a module. After loading the extension globally, it returned a modified copy of the Sequel::Dataset with the extension loaded into it.
If you want to load an extension into all future datasets for a given Sequel::Database instance, you can also load it as a Database extension:
DB.extension :columns_introspection
Likewise, if you want to load an extension into all future datasets for all future databases, you can load it via Sequel::Database.extension:
Sequel::Database.extension :columns_introspection
== Creating Global Extensions
If you want to create a global extension, you just need to store your code so that you can require it via sequel/extensions/extension_name. Then users can load it via:
Sequel.extension :extension_name
It is recommended you only create a global extension if what you want to do would not work as a Database or Dataset extension.
== Creating Database Extensions
Creating Database extensions is similar to global extensions in terms of creating the file. However, somewhere in the file, you need to call Sequel::Database.register_extension. Usually you would call this with the module that will be added to the related Sequel::Database instance when the extension is loaded. For example, the server_block extension uses something like:
Sequel::Database.register_extension(:server_block, Sequel::ServerBlock)
The first argument is the name of the extension as a symbol, and the second is the module.
In some cases, just extending the Sequel::Database instance with a module is not sufficient. So Sequel::Database.register_extension also accepts a proc instead of a second argument. This proc is called with the Sequel::Database instance, and can then run any related code:
Sequel::Database.register_extension(:arbitrary_servers){|db| db.pool.extend(Sequel::ArbitraryServers)}
== Creating Dataset Extensions
Creating Dataset extensions is very similar to creating Database extensions, but instead of calling Sequel::Database.register_extension, you call Sequel::Dataset.register_extension. In general, you would call this with the module that will be added to the related Sequel::Dataset instance when the extension is loaded. For example, the columns_introspection extension uses something like:
Sequel::Dataset.register_extension(:columns_introspection, Sequel::ColumnsIntrospection)
The first argument is the name of the extension as a symbol, and the second is the module. When you call the Sequel::Dataset.register_extension method with a module, it in turn calls Sequel::Database.register_extension and adds a Database extension that loads this Dataset extension into all future Datasets created from the Database.
You can also call Sequel::Dataset.register_extension with a proc:
Sequel::Dataset.register_extension(:extension_name){|ds| }
Note that if you use a proc, a corresponding Database extension will not be created automatically (you can still call Sequel::Database.register_extension manually in this case).
sequel-5.63.0/doc/fork_safety.rdoc 0000664 0000000 0000000 00000004541 14342141206 0017036 0 ustar 00root root 0000000 0000000 = Fork Safety
If you are forking or using a library that forks after you have created a
Sequel::Database instance, then you must disconnect database connections before forking. If you
don't do this, you can end up with child processes sharing database connections
and all sorts of weird behavior, including crashes. Sequel will automatically create new
connections on an as needed basis in the child processes, so you only need to do the following in
the parent process:
DB.disconnect
Or if you have connections to multiple databases:
Sequel::DATABASES.each(&:disconnect)
== Puma
When using the Puma web server in clustered mode (which is the default behavior in Puma 5+ when
using multiple processes), you should disconnect inside the +before_fork+ hook in your
Puma config:
before_fork do
Sequel::DATABASES.each(&:disconnect)
end
== Unicorn
When using the Unicorn web server and preloading the application (+preload_app true+ in the Unicorn
config), you should disconnect inside the +before_fork+ hook in the Unicorn config:
before_fork do
Sequel::DATABASES.each(&:disconnect)
end
== Passenger
In Passenger web server, you should disconnect inside the
+starting_worker_process+ event hook:
if defined?(PhusionPassenger)
PhusionPassenger.on_event(:starting_worker_process) do |forked|
Sequel::DATABASES.each(&:disconnect) if forked
end
end
Note that this disconnects after forking instead of before forking. Passenger does not
offer a before fork hook.
== Spring
In Spring application preloader, you should disconnect inside the +after_fork+ hook:
if defined?(Spring)
Spring.after_fork do
Sequel::DATABASES.each(&:disconnect)
end
end
As the method indicates, this disconnects after forking instead of before forking.
Spring does not offer a before fork hook.
== Resque
In Resque, you should disconnect inside the +before_fork+ hook:
Resque.before_fork do |job|
Sequel::DATABASES.each(&:disconnect)
end
== Parallel
If you're using the Parallel gem with processes, you should disconnect before
calling it:
Sequel::DATABASES.each(&:disconnect)
Parallel.map(['a','b','c'], in_processes: 3) { |one_letter| }
== Other Libraries Calling fork
For any other library that calls fork, you should disconnect before calling
a method that forks:
Sequel::DATABASES.each(&:disconnect)
SomeLibrary.method_that_forks
sequel-5.63.0/doc/mass_assignment.rdoc 0000664 0000000 0000000 00000010416 14342141206 0017713 0 ustar 00root root 0000000 0000000 = Sequel::Model Mass Assignment
Most Model methods that take a hash of attribute keys and values, including Model.new,
Model.create, Model#set and Model#update are subject to Sequel's mass assignment rules.
If you have an instance of a plain Sequel::Model class:
class Post < Sequel::Model
end
post = Post.new
and you call a mass assignment method with a hash:
post.set(title: 'T', body: 'B')
the mass assignment method will go through each key in the hash, append = to it to determine the
setter method, and if the setter method is defined and access to it is not restricted, Sequel will call the
setter method with the hash value. So if we assume that the posts table has title and body columns, what
the above mass assignment call actually does is:
post.title=('T')
post.body=('B')
By default, there are two types of setter methods that are restricted.
The first is methods like typecast_on_assignment= and ==, which don't affect columns.
These methods cannot be enabled for mass assignment.
The second is primary key setters.
So if you do:
post = Post.new(id: 1)
Sequel will raise a Sequel::MassAssignmentRestriction exception, since by default setting the primary key is not allowed.
To enable use of primary key setters, you need to call +unrestrict_primary_key+ for that model:
Post.unrestrict_primary_key
If you want to change mass assignment so it ignores attempts to access restricted setter methods, you can do:
# Global default
Sequel::Model.strict_param_setting = false
# Class level
Post.strict_param_setting = false
# Instance level
post.strict_param_setting = false
Since mass assignment by default allows modification of all column values except for primary key columns, it can be a security risk in some cases.
If you are dealing with untrusted input, you are generally going to want to restrict what should be updated.
Sequel has Model#set_fields and Model#update_fields methods, which are designed to be used with untrused input.
These methods take two arguments, the untrusted hash as the first argument, and a trusted array of field names as the second argument:
post.set_fields({title: 'T', body: 'B'}, [:title, :body])
Instead of looking at every key in the untrusted hash, +set_fields+ will iterate over the trusted field names, looking each up in the hash, and
calling the setter method appropriately with the result. +set_fields+ basically translates the above method call to:
post.title=('T')
post.body=('B')
By using this method, you can be sure that the mass assignment method only sets the fields you expect it to set.
Note that if one of the fields does not exist in the hash:
post.set_fields({title: 'T'}, [:title, :body])
+set_fields+ will set the value to nil (the default hash value) by default, with behavior equivalent to:
post.title=('T')
post.body=(nil)
You can use the :missing option to +set_fields+ to change the behavior:
post.set_fields({title: 'T'}, [:title, :body], missing: :skip)
# post.title=('T') # only
post.set_fields({title: 'T'}, [:title, :body], missing: :raise)
# raises Sequel::Error
If you want to set a model level default for the +set_fields+ options, you can use the +default_set_fields_options+ class accessor:
# Global default
Sequel::Model.default_set_fields_options[:missing] = :skip
# Class level
Post.default_set_fields_options[:missing] = :skip
Here's a table describing Sequel's default mass assignment methods:
Model.new(hash) :: Creates a new model instance, then calls Model#set(hash)
Model.create(hash) :: Calls Model.new(hash).save
Model#set(hash) :: Calls related setter method (unless access is restricted) for each key in the hash, then returns self
Model#update(hash) :: Calls set(hash).save_changes
Model#set_fields(hash, columns, options) :: For each column in columns, looks up related entry in hash, and calls the related setter method
Model#update_fields(hash, columns, options) :: Calls set_fields(hash, columns, options).save_changes
For backwards compatibility, Sequel also ships with a whitelist_security and blacklist_security plugins that offer additional mass assignment
methods, but it is recommended to use +set_fields+ or +update_fields+ for untrusted input, and the other methods for trusted input.
sequel-5.63.0/doc/migration.rdoc 0000664 0000000 0000000 00000062663 14342141206 0016524 0 ustar 00root root 0000000 0000000 = Migrations
This guide is based on http://guides.rubyonrails.org/migrations.html
== Overview
Migrations make it easy to alter your database's schema in a systematic manner.
They make it easier to coordinate with other developers and make sure that
all developers are using the same database schema.
Migrations are optional, you don't have to use them. You can always just
create the necessary database structure manually using Sequel's schema
modification methods or another database tool. However, if you are dealing
with other developers, you'll have to send them all of the changes you are
making. Even if you aren't dealing with other developers, you generally have
to make the schema changes in 3 places (development, testing, and
production), and it's probably easier to use the migrations system to apply
the schema changes than it is to keep track of the changes manually and
execute them manually at the appropriate time.
Sequel tracks which migrations you have already run, so to apply migrations
you generally need to run Sequel's migrator with bin/sequel -m:
sequel -m path/to/migrations postgres://host/database
Migrations in Sequel use a DSL via the Sequel.migration
method, and inside the DSL, use the Sequel::Database schema
modification methods such as +create_table+ and +alter_table+.
See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
for details on the schema modification methods you can use.
== A Basic Migration
Here is a fairly basic Sequel migration:
Sequel.migration do
up do
create_table(:artists) do
primary_key :id
String :name, null: false
end
end
down do
drop_table(:artists)
end
end
This migration has an +up+ block which adds an artist table with an integer primary key named id,
and a varchar or text column (depending on the database) named +name+ that doesn't accept +NULL+ values.
Migrations should include both up and +down+ blocks, with the +down+ block reversing
the change made by up. However, if you never need to be able to migrate down,
you can leave out the +down+ block. In this case, the +down+ block just reverses the changes made by up,
dropping the table.
You can simplify the migration given above by using a reversible migration with a +change+
block:
Sequel.migration do
change do
create_table(:artists) do
primary_key :id
String :name, null: false
end
end
end
The +change+ block acts exactly like an +up+ block. The only difference is that
it will attempt to create a +down+ block for you, assuming that it knows how to
reverse the given migration. The +change+ block can usually correctly reverse
the following methods:
* +create_table+
* +create_join_table+
* +create_view+
* +add_column+
* +add_index+
* +rename_column+
* +rename_table+
* +alter_table+ (supporting the following methods in the +alter_table+ block):
* +add_column+
* +add_constraint+
* +add_foreign_key+ (with a symbol, not an array)
* +add_primary_key+ (with a symbol, not an array)
* +add_index+
* +add_full_text_index+
* +add_spatial_index+
* +rename_column+
If you use any other methods, you should create your own +down+ block.
In normal usage, when Sequel's migrator runs, it runs the +up+ blocks for all
migrations that have not yet been applied. However, you can use the -M
switch to specify the version to which to migrate, and if it is lower than the
current version, Sequel will run the +down+ block on the appropriate migrations.
You are not limited to creating tables inside a migration, you can alter existing tables
as well as modify data. Let's say your artist database originally only included artists
from Sacramento, CA, USA, but now you want to branch out and include artists in any city:
Sequel.migration do
up do
add_column :artists, :location, String
from(:artists).update(location: 'Sacramento')
end
down do
drop_column :artists, :location
end
end
This migration adds a +location+ column to the +artists+ table, and sets the +location+ column
to 'Sacramento' for all existing artists. It doesn't use a default on the column,
because future artists should not be assumed to come from Sacramento. In the +down+ block, it
just drops the +location+ column from the +artists+ table, reversing the actions of the up
block.
Note that when updating the +artists+ table in the update, a plain dataset is used, from(:artists).
This may look a little strange, but you need to be aware that inside an up or +down+ block in a migration,
self always refers to the Sequel::Database object that the migration is being applied to.
Since Database#from creates datasets, using from(:artists) inside the +up+ block creates
a dataset on the database representing all columns in the +artists+ table, and updates it to set the
+location+ column to 'Sacramento'. You should avoid referencing the Sequel::Database
object directly in your migration, and always use self to reference it, otherwise you may run into problems.
== The +migration+ extension
The migration code is not technically part of the core of Sequel. It's not loaded by default as it
is only useful in specific cases. It is one of the extensions that ship with Sequel, which receive the same
level of support as Sequel's core.
If you want to play with Sequel's migration tools without using the bin/sequel tool, you
need to load the migration extension manually:
Sequel.extension :migration
== Schema methods
Migrations themselves do not contain any schema modification methods, but they make it easy to call
any of the Sequel::Database modification methods, of which there are many. The main
ones are +create_table+ and +alter_table+, but Sequel also comes with numerous other schema
modification methods, most of which are shortcuts for +alter_table+ (all of these methods are
described in more detail in the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]):
* add_column
* add_index
* create_view
* drop_column
* drop_index
* drop_table
* drop_view
* rename_table
* rename_column
* set_column_default
* set_column_type
These methods handle the vast majority of cross database schema modification SQL. If you
need to drop down to SQL to execute some database specific code, you can use the +run+
method:
Sequel.migration do
up{run 'CREATE TRIGGER ...'}
down{run 'DROP TRIGGER ...'}
end
In this case, we are using { and } instead of do and end to define the blocks. Just as
before, the +run+ methods inside the blocks are called on the +Database+ object,
which just executes the code on the underlying database.
== Errors when running migrations
Sequel attempts to run migrations inside of a transaction if the database supports
transactional DDL statements. On the databases that don't support transactional DDL
statements, if there is an error while running a migration, it will not rollback the
previous schema changes made by the migration. In that case, you will
need to update the database by hand.
It's recommended to always run migrations on a test database and ensure they work
before running them on any production database.
== Transactions
You can manually specify to use transactions on a per migration basis. For example,
if you want to force transaction use for a particular migration, call the transaction
method in the Sequel.migration block:
Sequel.migration do
transaction
change do
# ...
end
end
Likewise, you can disable transaction use via no_transaction:
Sequel.migration do
no_transaction
change do
# ...
end
end
This is necessary in some cases, such as when attempting to use CREATE INDEX CONCURRENTLY
on PostgreSQL (which supports transactional schema, but not that statement inside a
transaction).
You can also override the transactions setting at the migrator level, either by forcing
transactions even if no_transaction is set, or by disabling transactions all together:
# Force transaction use
Sequel::Migrator.run(DB, '/path/to/migrations/dir', :use_transactions=>true)
# Disable use of transactions
Sequel::Migrator.run(DB, '/path/to/migrations/dir', :use_transactions=>false)
== Migration files
While you can create migration objects yourself and apply them manually, most of the
benefit to using migrations come from using Sequel's +Migrator+, which is what the
bin/sequel -m switch does. Sequel's +Migrator+ expects that each migration
will be in a separate file in a specific directory. The -m switch requires an
argument be specified that is the path to the directory containing the migration files.
For example:
sequel -m db/migrations postgres://localhost/sequel_test
will look in the db/migrations folder relative to the current directory,
and run unapplied migrations on the PostgreSQL database sequel_test running on localhost.
== Two separate migrators
Sequel actually ships with two separate migrators. One is the +IntegerMigrator+, the other is
the +TimestampMigrator+. They both have plusses and minuses:
=== +IntegerMigrator+
* Simpler, uses migration versions starting with 1
* Doesn't allow duplicate migrations
* Doesn't allow missing migrations by default
* Just stores the version of the last migration run
* Good for single developer or small teams with close communication
* Lower risk of undetected conflicting migrations
* Requires manual merging of simultaneous migrations
=== +TimeStampMigrator+
* More complex, uses migration versions where the version should represent a timestamp
* Allows duplicate migrations (since you could have multiple in a given second)
* Allows missing migrations (since you obviously don't have one every second)
* Stores the file names of all applied migrations
* Good for large teams without close communication
* Higher risk of undetected conflicting migrations
* Does not require manual merging of simultaneous migrations
=== Filenames
In order for migration files to work with the Sequel, they must be specified as follows:
version_name.rb
where version is an integer and name is a string which should be a very brief
description of what the migration does. Each migration class should contain 1 and only 1
call to Sequel.migration.
=== +IntegerMigrator+ Filenames
These are valid migration names for the +IntegerMigrator+:
1_create_artists.rb
2_add_artist_location.rb
The only problem with this naming format is that if you have more than 9 migrations, the 10th
one will look a bit odd:
1_create_artists.rb
2_add_artist_location.rb
...
9_do_something.rb
10_do_something_else.rb
For this reasons, it's often best to start with 001 instead of 1, as that means you don't need
to worry about that issue until the 1000th migration:
001_create_artists.rb
002_add_artist_location.rb
...
009_do_something.rb
010_do_something_else.rb
Migrations start at 1, not 0. The migration version number 0
is important though, as it is used to mean that all migrations should be unapplied (i.e. all
+down+ blocks run). In Sequel, you can do that with:
sequel -m db/migrations -M 0 postgres://localhost/sequel_test
=== +TimestampMigrator+ Filenames
With the +TimestampMigrator+, the version integer should represent a timestamp, though this isn't strictly
required.
For example, for 5/10/2010 12:00:00pm, you could use any of the following formats:
# Date
20100510_create_artists.rb
# Date and Time
20100510120000_create_artists.rb
# Unix Epoch Time Integer
1273518000_create_artists.rb
The important thing is that all migration files should be in the same format, otherwise when you
update, it'll be difficult to make sure migrations are applied in the correct order, as well as
be difficult to unapply some the affected migrations correctly.
The +TimestampMigrator+ will be used if any filename in the migrations directory has a version
greater than 20000101. Otherwise, the +IntegerMigrator+ will be used.
You can force the use of the +TimestampMigrator+ in the API by calling TimestampMigrator.apply
instead of Migrator.apply.
=== How to choose
Basically, unless you need the features provided by the +TimestampMigrator+, stick with the
+IntegerMigrator+, as it is simpler and makes it easier to detect possible errors.
For a single developer, the +TimestampMigrator+ has no real benefits, so I would always recommend
the +IntegerMigrator+. When dealing with multiple developers, it depends on the size of the
development team, the team's communication level, and the level of overlap between developers.
Let's say Alice works on a new feature that requires a migration at the same time Bob works
on a separate feature that requires an unrelated migration. If both developers are committing
to their own private respositories, when it comes time to merge, the +TimestampMigrator+ will not
require any manually changes. That's because Alice will have a migration such as
20100512_do_this.rb and Bob will have one such as 20100512_do_that.rb.
If the +IntegerMigrator+ was used, Alice would have 34_do_this.rb and Bob would have
34_do_that.rb. When the +IntegerMigrator+ was used, it would raise an exception due to
the duplicate migration version. The only way to fix it would be to renumber one of the two
migrations, and have the affected developer manually modify their database.
So for unrelated migrations, the +TimestampMigrator+ works fine. However, let's say that the
migrations are related, in such a way that if Bob's is run first, Alice's will fail. In this
case, the +TimestampMigrator+ would not raise an error when Bob merges Alice's changes, since
Bob ran his migration first. However, it would raise an error when Alice runs Bob's migration,
and could leave the database in an inconsistent state if the database doesn't support transactional
schema changes.
With the +TimestampMigrator+, you are trading reliability for convenience. That's possibly a valid
trade, especially if simultaneous related schema changes by separate developers are unlikely, but
you should give it some thought before using it.
== Ignoring missing migrations
In some cases, you may want to allow a migration in the database that does not exist in the filesystem (deploying to an older version of code without running a down migration when deploy auto-migrates, for example). If required, you can pass allow_missing_migration_files: true as an option. This will stop errors from being raised if there are migrations in the database that do not exist in the filesystem. Note that the migrations themselves can still raise an error when using this option, if the database schema isn't in the state the migrations expect it to be in. In general, the allow_missing_migration_files: true option is very risky to use, and should only be used if it is absolutely necessary.
== Modifying existing migrations
Just don't do it.
In general, you should not modify any migration that has been run on the database and been committed to
the source control repository, unless the migration contains an error that causes data loss. As long
as it is possible to undo the migration without losing data, you should just add another migration
that undoes the actions of the previous bad migration, and does the correct action afterward.
The main problem with modifying existing migrations is that you will have to manually modify any
databases that ran the migration before it was modified. If you are a single developer, that may be
an option, but certainly if you have multiple developers, it's a lot more work.
== Creating a migration
Sequel doesn't come with generators that create migrations for you. However, creating a migration
is as simple as creating a file with the appropriate filename in your migrations directory that
contains a Sequel.migration call. The minimal do-nothing migration is:
Sequel.migration{}
However, the migrations you write should contain an +up+ block that does something, and a +down+ block that
reverses the changes made by the +up+ block:
Sequel.migration do
up{}
down{}
end
or they should use the reversible migrations feature with a +change+ block:
Sequel.migration do
change{}
end
== What to put in your migration's +down+ block
It's usually easy to determine what you should put in your migration's +up+ block,
as it's whatever change you want to make to the database. The +down+ block is
less obvious. In general, it should reverse the changes made by the +up+ block, which means
it should execute the opposite of what the +up+ block does in the reverse order in which
the +up+ block does it. Here's an example where you are switching from having a single
artist per album to multiple artists per album:
Sequel.migration do
up do
# Create albums_artists table
create_join_table(album_id: :albums, artist_id: :artists)
# Insert one row in the albums_artists table
# for each row in the albums table where there
# is an associated artist
from(:albums_artists).insert([:album_id, :artist_id],
from(:albums).select(:id, :artist_id).exclude(artist_id: nil))
# Drop the now unnecesssary column from the albums table
drop_column :albums, :artist_id
end
down do
# Add the foreign key column back to the artists table
alter_table(:albums){add_foreign_key :artist_id, :artists}
# If possible, associate each album with one of the artists
# it was associated with. This loses information, but
# there's no way around that.
from(:albums).update(artist_id: from(:albums_artists).
select{max(artist_id)}.
where(album_id: Sequel[:albums][:id])
)
# Drop the albums_artists table
drop_join_table(album_id: :albums, artist_id: :artists)
end
end
Note that the operations performed in the +down+ block are performed in the
reverse order of how they are performed in the +up+ block. Also note how it
isn't always possible to reverse exactly what was done in the +up+ block. You
should try to do so as much as possible, but if you can't, you may want to have
your +down+ block raise a Sequel::Error exception saying why the
migration cannot be reverted.
== Running migrations
You can run migrations using the +sequel+ command line program that
comes with Sequel. If you use the -m switch, +sequel+ will
run the migrator instead of giving you an IRB session. The -m
switch requires an argument that should be a path to a directory of migration
files:
sequel -m relative/path/to/migrations postgres://host/database
sequel -m /absolute/path/to/migrations postgres://host/database
If you do not provide a -M switch, +sequel+ will migrate to the latest
version in the directory. If you provide a -M switch, it should specify
an integer version to which to migrate.
# Migrate all the way down
sequel -m db/migrations -M 0 postgres://host/database
# Migrate to version 10 (IntegerMigrator style migrations)
sequel -m db/migrations -M 10 postgres://host/database
# Migrate to version 20100510 (TimestampMigrator migrations using YYYYMMDD)
sequel -m db/migrations -M 20100510 postgres://host/database
Whether or not migrations use the +up+ or +down+ block depends on the version
to which you are migrating. If you don't provide a -M switch, all
unapplied migrations will be migrated up. If you provide a -M, it will
depend on which migrations that have been applied. Applied migrations greater
than that version will be migrated down, while unapplied migrations less than
or equal to that version will be migrated up.
== Running migrations from a Rake task
You can also incorporate migrations into a Rakefile:
namespace :db do
desc "Run migrations"
task :migrate, [:version] do |t, args|
require "sequel/core"
Sequel.extension :migration
version = args[:version].to_i if args[:version]
Sequel.connect(ENV.fetch("DATABASE_URL")) do |db|
Sequel::Migrator.run(db, "db/migrations", target: version)
end
end
end
To migrate to the latest version, run:
rake db:migrate
This Rake task takes an optional argument specifying the target
version. To migrate to version 42, run:
rake db:migrate[42]
== Verbose migrations
By default, sequel -m operates as a well behaved command line utility
should, printing out nothing if there is no error. If you want to see the SQL
being executed during a migration, as well as the amount of time that each
migration takes, you can use the -E option to +sequel+ to set up a
+Database+ logger that logs to +STDOUT+. You can also log that same output to
a file using the -l option with a log file name.
If you want to include a logger in the rake task above, add a +:logger+ option
when calling Sequel.connect:
require "logger"
Sequel.connect(ENV.fetch("DATABASE_URL"), logger: Logger.new($stderr))
== Using models in your migrations
Just don't do it.
It can be tempting to use models in your migrations, especially since it's easy
to load them at the same time using the -L option to +sequel+. However,
this ties your migrations to your models, and makes it so that changes in your
models can break old migrations.
With Sequel, it should be easy to use plain datasets to accomplish pretty much
anything you would want to accomplish in a migration. Even if you have to
copy some code from a model method into a migration itself, it's better than
having your migration use models and call model methods.
== Dumping the current schema as a migration
Sequel comes with a +schema_dumper+ extension that dumps the current schema of
the database as a migration to +STDOUT+ (which you can redirect to a file using
>). This is exposed in the +sequel+ command line tool with the -d and
-D switches. -d dumps the schema in database independent
format, while -D dumps the schema using a non-portable format, useful
if you are using nonportable columns such as +inet+ in your database.
Let's say you have an existing database and want to create a migration that
would recreate the database's schema:
sequel -d postgres://host/database > db/migrations/001_start.rb
or using a nonportable format:
sequel -D postgres://host/database > db/migrations/001_start.rb
The main difference between the two is that -d will use the type methods
with the database independent ruby class types, while -D will use
the +column+ method with string types.
You can take the migration created by the schema dumper to another computer
with an empty database, and attempt to recreate the schema using:
sequel -m db/migrations postgres://host/database
The schema_dumper extension is quite limited in what types of
database objects it supports. In general, it only supports
dumping tables, columns, primary key and foreign key constraints,
and some indexes. It does not support most table options, CHECK
constraints, partial indexes, database functions, triggers,
security grants/revokes, and a wide variety of other useful
database properties. Be aware of the limitations when using the
schema_dumper extension. If you are dumping the schema to restore
to the same database type, it is recommended to use your database's
dump and restore programs instead of the schema_dumper extension.
== Checking for Current Migrations
In your application code, you may want to check that you are up to date in
regards to migrations (i.e. you don't have any unapplied migrations). Sequel
offers two separate methods to do that. The first is Sequel::Migrator.check_current.
This method raises an exception if there are outstanding migrations that need to
be run. The second is Sequel::Migrator.is_current?, which returns true if there
are no outstanding migrations, and false if there are outstanding migrations.
If you want to ensure that your application code is up to date, you may want to
add the following code after connecting to your database:
Sequel.extension :migration
Sequel::Migrator.check_current(DB, '/path/to/migrations')
This will cause your application to raise an error when you start it if you have
any outstanding migrations.
== Old-style migration classes
Before the Sequel.migration DSL was introduced, Sequel used classes
for Migrations:
Class.new(Sequel::Migration) do
def up
end
def down
end
end
or:
class DoSomething < Sequel::Migration
def up
end
def down
end
end
This usage is discouraged in new code, but will continue to be supported indefinitely.
It is not recommended to convert old-style migration classes to the Sequel.migration
DSL, but it is recommended to use the Sequel.migration DSL for all new migrations.
== Database-specific migrations
While not a recommended practice, it is sometimes necessary to have parts of migrations be
database-specific . You can use the Sequel::Database#database_type method to check which
database the migration is being run on, and operate accordingly:
Sequel.migration do
up do
if database_type == :mysql
run 'MySQL specific code'
else
run 'Generic SQL code'
end
end
down do
if database_type == :mysql
run 'MySQL specific code'
else
run 'Generic SQL code'
end
end
end
== Using Database Extensions in Migrations
If you need to use database extensions in migrations (e.g. +:pg_enum+), you should load the extension in the up or down block as appropriate.
Sequel.migration do
up do
extension :pg_enum
# migration here
end
down do
extension :pg_enum
# migration here
end
end
sequel-5.63.0/doc/model_dataset_method_design.rdoc 0000664 0000000 0000000 00000017007 14342141206 0022221 0 ustar 00root root 0000000 0000000 = Model Dataset Method Design Guide
How you design your model dataset methods can significantly affect the flexibility of your API for your model classes, as well as the performance. The goal of this guide is to provide an example of how to design your model dataset methods for maximum flexibility and performance.
== Flexibility: Use Single Method Per Task
In general, it is recommended that you have a single method per task for maximum flexibility. For example, let's say you need to retrieve all albums released in a given year, ordered by number of units sold descending, and only care about the id, name and number of units sold. One way to do this is in your application code (outside the model), you can
call the dataset methods directly:
Album.
select(:id, :name, :copies_sold).
where(release_year: params[:year].to_i).
order(Sequel.desc(:copies_sold)).
all
One issue with this design is that it ties you to your current database schema, and will make it necessary to change your application code if your schema changes. In general, it is better to encapsulate your code into a dataset method (or a class method, but a dataset method is more flexible):
class Album < Sequel::Model
dataset_module do
def all_albums_released_in_year(year)
select(:id, :name, :copies_sold).
where(release_year: year).
order(Sequel.desc(:copies_sold)).
all
end
end
end
Then your application code just needs to call your dataset method:
Album.all_albums_released_in_year(params[:year].to_i)
The advantage of this approach is that you can change your schema at any point in the future, and you should only need to change your model code, you should never need to change other application code.
== Performance
After designing your dataset methods for flexibility, stop. Don't worry about performance until you need to worry about performance. However, assuming you have profiled your application and profiling shows you can benefit from optimizing the above method, you can then consider the performance impact of future design choices.
First, considering that the root cause of the performance issue may not be at the Sequel level, it may be at the database itself. Use +EXPLAIN+ or the equivalent to analyze the query plan for the query in use, and see if there is something you can do to optimize it, such as adding an appropriate index.
Second, assuming the performance issue is at the Sequel level, you need to understand that one of the best ways to improve performance in most ruby code is to reduce the number of objects allocated. Here is the above code with comments showing datasets allocated:
def all_albums_released_in_year(year)
select(:id, :name, :copies_sold). # new dataset allocated
where(release_year: year). # new dataset allocated
order(Sequel.desc(:copies_sold)). # new dataset allocated
all
end
Third, you need to understand that Sequel has optimizations specifically designed to reduce the number of objects allocated, by caching intermediate datasets. Unfortunately, those optimizations do not apply in this case. The reason for this is that +select+, +where+, and +order+ can potentially receive arbitrary arguments, and enabling caching for them could easily lead to unbounded cache size (denial of service due to memory exhaustion).
To allow intermediate dataset caching to work, you need to signal to Sequel that particular arguments to these methods should be cached, and you can do that by calling methods inside +dataset_module+ blocks such as +select+ and +order+. These methods will add dataset methods to the model that can cache the returned dataset to optimize performance. Here is an example using these methods:
class Album < Sequel::Model
dataset_module do
select :with_name_and_units, :id, :name, :copies_sold
order :by_units_sold, Sequel.desc(:copies_sold)
def all_albums_released_in_year(year)
with_name_and_units.
by_units_sold.
where(release_year: year).
all
end
end
end
Performance aside, this does provide a slightly nicer and more readable internal API, though naming such methods can be problematic.
By calling +select+ and +order+ here, Sequel expects that the created dataset methods may be called more than once on the same dataset, and it knows that the arguments to the underlying +select+ and +order+ methods are fixed, so it can cache the resulting datasets. Let's comment the above example with dataset allocations:
def all_albums_released_in_year(year)
with_name_and_units. # cached dataset returned
by_units_sold. # cached dataset returned
where(release_year: year). # new dataset allocated
all
end
Note that the order of methods here is important. If you instead change the method chain to filter the dataset first, then no caching happens:
def all_albums_released_in_year(year)
where(release_year: year). # new dataset allocated
with_name_and_units. # new dataset allocated
by_units_sold. # new dataset allocated
all
end
This is because any time a new, uncached dataset is returned by a dataset method, all subsequent methods in the method chain cannot benefit from caching.
Usually, when you are designing methods to process data based on user input, the user input affects the rows selected, and not the columns selected or the order in which the rows are returned. Sequel is aware of this and has dataset methods that specifically take user input (arguments), interpret them as a filter condition and either:
* Return all matching rows in an array (+where_all+)
* Iterate over all matching rows (+where_each+)
* Return first matching row (+first+)
* Return first column in first matching row, assumes only a single column is selected (+where_single_value+)
After calling these methods on a cached dataset a number of times (currently 3), Sequel will automatically build an optimized loader, cache it, and use it for future loads. So the above example changes to:
def all_albums_released_in_year(year)
with_name_and_units. # cached dataset returned
by_units_sold. # cached dataset returned
where_all(release_year: year) # cached loader used
end
This can significantly improve performance, up to 3x for complex method chains that only return a few rows.
So the general advice on designing dataset methods for performance is:
* Use +dataset_module+ methods to create named dataset methods that return cached datasets
* If any filtering is to be done, have it done last using +where_all+, +where_each+, +first+, or +where_single_value+.
By following this advice, you can significantly increase the performance of your model dataset code.
=== Further Increasing Performance
The best way to further increase performance at the Sequel level is to switch to using prepared statements. This does require more significant changes to the API. Here's an example using prepared statements:
class Album < Sequel::Model
ALBUMS_RELEASED_IN_YEAR = select(:id, :name, :copies_sold).
where(release_year: :$year).
order(Sequel.desc(:copies_sold)).
prepare(:all, :all_albums_released_in_year)
def self.all_albums_released_in_year(year)
ALBUMS_RELEASED_IN_YEAR.call(year: year)
end
end
Note that when using prepared statements, you need to use a class method instead of a dataset method, as the SQL for the prepared statement must be fixed for the class. This limits the flexibility of the method, since you can no longer call it on arbitrary datasets on the class.
sequel-5.63.0/doc/model_hooks.rdoc 0000664 0000000 0000000 00000026253 14342141206 0017031 0 ustar 00root root 0000000 0000000 = Model Hooks
This guide is based on http://guides.rubyonrails.org/activerecord_validations_callbacks.html
== Overview
Model hooks are used to specify actions that occur at a given point in a model instance's lifecycle, such as before or after the model object is saved, created, updated, destroyed, or validated. There are also around hooks for all types, which wrap the before hooks, the behavior, and the after hooks.
== Basic Usage
Sequel::Model uses instance methods for hooks. To define a hook on a model, you just add an instance method to the model class:
class Album < Sequel::Model
def before_create
self.created_at ||= Time.now
super
end
end
The one important thing to note here is the call to +super+ inside the hook. Whenever you override one of Sequel::Model's methods, you should be calling +super+ to get the default behavior. Many of the plugins that ship with Sequel work by overriding the hook methods and calling +super+. If you use these plugins and override the hook methods but do not call +super+, it's likely the plugins will not work correctly.
== Available Hooks
Sequel calls hooks in the following order when saving/creating a new object (one that does not already exist in the database):
* +around_validation+
* +before_validation+
* +validate+ method called
* +after_validation+
* +around_save+
* +before_save+
* +around_create+
* +before_create+
* INSERT QUERY
* +after_create+
* +after_save+
Sequel calls hooks in the following order when saving an existing object:
* +around_validation+
* +before_validation+
* +validate+ method called
* +after_validation+
* +around_save+
* +before_save+
* +around_update+
* +before_update+
* UPDATE QUERY
* +after_update+
* +after_save+
Note that all of the hook calls are the same, except that +around_create+, +before_create+ and +after_create+ are used for a new object, and +around_update+, +before_update+ and +after_update+ are used for an existing object. Note that +around_save+, +before_save+, and +after_save+ are called in both cases.
Note that the validation hooks are still called if validate: false option is passed to save. If you call Model#valid? manually, then only the validation hooks are called:
* +around_validation+
* +before_validation+
* +validate+ method called
* +after_validation+
Sequel calls hooks in the following order when destroying an existing object:
* +around_destroy+
* +before_destroy+
* DELETE QUERY
* +after_destroy+
Note that these hooks are only called when using Model#destroy, they are not called if you use Model#delete.
== Transaction-related Hooks
Sequel::Model no longer offers transaction hooks for model instances. However, you can use the database transaction hooks inside model +before_save+ and +after_save+ hooks:
class Album < Sequel::Model
def before_save
db.after_rollback{rollback_action}
super
end
def after_save
super
db.after_commit{commit_action}
end
end
== Running Hooks
Sequel does not provide a simple way to turn off the running of save/create/update hooks. If you attempt to save a model object, the save hooks are always called. All model instance methods that modify the database call save in some manner, so you can be sure that if you define the hooks, they will be called when you save the object.
However, you should note that there are plenty of ways to modify the database without saving a model object. One example is by using plain datasets, or one of the model's dataset methods:
Album.where(name: 'RF').update(copies_sold: Sequel.+(:copies_sold, 1))
# UPDATE albums SET copies_sold = copies_sold + 1 WHERE name = 'RF'
In this case, the +update+ method is called on the dataset returned by Album.where. Even if there is only a single object with the name RF, this will not call any hooks. If you want model hooks to be called, you need to make sure to operate on a model object:
album = Album.first(name: 'RF')
album.update(copies_sold: album.copies_sold + 1)
# UPDATE albums SET copies_sold = 2 WHERE id = 1
For the destroy hooks, you need to make sure you call +destroy+ on the object:
album.destroy # runs destroy hooks
== Skipping Hooks
Sequel makes it easy to skip destroy hooks by calling +delete+ instead of +destroy+:
album.delete # does not run destroy hooks
However, skipping hooks is a bad idea in general and should be avoided. As mentioned above, Sequel doesn't allow you to turn off the running of save hooks. If you know what you are doing and really want to skip them, you need to drop down to the dataset level to do so. This can be done for a specific model object by using the +this+ method for a dataset that represents a single object:
album.this # dataset
The +this+ dataset works just like any other dataset, so you can call +update+ on it to modify it:
album.this.update(copies_sold: album.copies_sold + 1)
If you want to insert a row into the model's table without running the creation hooks, you can use Model.insert instead of Model.create:
Album.insert(name: 'RF') # does not run hooks
== Canceling Actions in Hooks
Sometimes want to cancel an action in a before hook, so the action is not performed. For example, you may want to not allow destroying or saving a record in certain cases. In those cases, you can call +cancel_action+ inside the before_* hook, which will stop processing the hook and will either raise a Sequel::HookFailed exception (the default), or return +nil+ if +raise_on_save_failure+ is +false+). You can use this to implement validation-like behavior, that will run even if validations are skipped:
class Album < Sequel::Model
def before_save
cancel_action if name == ''
super
end
end
For around hooks, neglecting to call +super+ halts hook processing in the same way as calling +cancel_action+ in a before hook. It's probably a bad idea to use +cancel_action+ hook processing in after hooks, or after yielding in around hooks, since by then the main processing has already taken place.
By default, Sequel runs hooks other than validation hooks inside a transaction, so if you cancel the action by calling +cancel_action+ in any hook, Sequel will rollback the transaction. However, note that the implicit use of transactions when saving and destroying model objects is conditional (it depends on the model instance's +use_transactions+ setting and the :transaction option passed to save).
== Conditional Hooks
Sometimes you only take to take a certain action in a hook if the object meets a certain condition. For example, let's say you only want to make sure a timestamp is set when updating if the object is at a certain status level:
class Album < Sequel::Model
def before_update
self.timestamp ||= Time.now if status_id > 3
super
end
end
Note how this hook action is made conditional just be using the standard ruby +if+ conditional. Sequel makes it easy to handle conditional hook actions by using standard ruby conditionals inside the instance methods.
== Using Hooks in Multiple Classes
If you want all your model classes to use the same hook, you can just define that hook in Sequel::Model:
class Sequel::Model
def before_create
self.created_at ||= Time.now
super
end
end
Just remember to call +super+ whenever you override the method in a subclass. Note that +super+ is also used when overriding the hook in Sequel::Model itself. This is important as if you add any plugins to Sequel::Model itself, if you override a hook in Sequel::Model and do not call +super+, the plugin may not work correctly.
If you don't want all classes to use the same hook, but want to reuse hooks in multiple classes, you should use a plugin or a simple module:
=== Plugin
module SetCreatedAt
module InstanceMethods
def before_create
self.created_at ||= Time.now
super
end
end
end
Album.plugin(SetCreatedAt)
Artist.plugin(SetCreatedAt)
=== Simple Module
module SetCreatedAt
def before_create
self.created_at ||= Time.now
super
end
end
Album.send(:include, SetCreatedAt)
Artist.send(:include, SetCreatedAt)
== +super+ Ordering
While it's not enforced anywhere, it's a good idea to make +super+ the last expression when you override a before hook, and the first expression when you override an after hook:
class Album < Sequel::Model
def before_save
self.updated_at ||= Time.now
super
end
def after_save
super
AuditLog.create(log: "Album #{name} created")
end
end
This allows the following general principles to be true:
* before hooks are run in reverse order of inclusion
* after hooks are run in order of inclusion
So if you define the same before hook in both a model and a plugin that the model uses, the hooks will be called in this order:
* model before hook
* plugin before hook
* plugin after hook
* model after hook
Again, Sequel does not enforce that, and you are free to call +super+ in an order other than the recommended one (just make sure that you call it).
== Around Hooks
Around hooks should only be used if you cannot accomplish the same results with before and after hooks. For example, if you want to catch database errors caused by the +INSERT+ or +UPDATE+ query when saving a model object and raise them as validation errors, you cannot use a before or after hook. You have use an +around_save+ hook:
class Album < Sequel::Model
def around_save
super
rescue Sequel::DatabaseError => e
# parse database error, set error on self, and reraise a Sequel::ValidationFailed
end
end
Likewise, let's say that upon retrieval, you associate an object with a file descriptor, and you want to ensure that the file descriptor is closed after the object is saved to the database. Let's assume you are always saving the object and you are not using validations. You could not use an +after_save+ hook safely, since if the database raises an error, the +after_save+ method will not be called. In this case, an +around_save+ hook is also the correct choice:
class Album < Sequel::Model
def around_save
super
ensure
@file_descriptor.close
end
end
== Hook related plugins
=== +instance_hooks+
Sequel also ships with an +instance_hooks+ plugin that allows you to define before and after hooks on a per instance basis. It's very useful as it allows you to delay action on an instance until before or after saving. This can be important if you want to modify a group of related objects together (which is how the +nested_attributes+ plugin uses +instance_hooks+).
=== +hook_class_methods+
While it's recommended to write your hooks as instance methods, Sequel ships with a +hook_class_methods+ plugin that allows you to define hooks via class methods. It exists mostly for legacy compatibility, but is still supported. However, it does not implement around hooks.
=== +after_initialize+
The after_initialize plugin adds an after_initialize hook, that is called for all model instances on creation (both new instances and instances retrieved from the database). It exists mostly for legacy compatibility, but it is still supported.
sequel-5.63.0/doc/model_plugins.rdoc 0000664 0000000 0000000 00000024110 14342141206 0017355 0 ustar 00root root 0000000 0000000 = Model Plugins
Sequel::Model (and Sequel in general) is designed around the idea of a small core, to which application-specific behavior can easily be added. Sequel::Model implements this design using a plugin system. Plugins are modules that include submodules for model class methods, model instance methods, and model dataset methods. All plugins can override the class, instance, and dataset methods added by earlier plugins, and call super to get the behavior before the plugin was added.
== Default Plugins
The Sequel::Model class is completely empty by default, in that it has no class methods or instance methods. Sequel::Model is itself a plugin, and it is the first plugin loaded, and it is loaded into itself (meta!). So methods in Sequel::Model::ClassMethods become Sequel::Model class methods, methods in Sequel::Model::InstanceMethods become Sequel::Model instance methods, and methods in Sequel::Model::DatasetMethods become Sequel::Model dataset methods. The Sequel::Model plugin is often referred to as the base plugin.
By default, the Sequel::Model class also has the Sequel::Model::Associations plugin loaded by default, though it is possible to disable this.
== Loading Plugins
Loading a plugin into a model class is generally as simple as calling the Sequel::Model.plugin method with the name of the plugin, for example:
Sequel::Model.plugin :subclasses
What is does is require the sequel/plugins/subclasses file, and then assumes that that file defines the Sequel::Plugins::Subclasses plugin module.
It's possible to pass module instances to the plugin method to load plugins that are stored in arbitrary files or namespaces:
Sequel::Model.plugin MyApp::Plugins::Foo
In the examples shown above, the plugin is loaded into Sequel::Model, which means it is loaded into all subclasses that are created afterward. With many plugins, you are not going to want to add them to Sequel::Model, but to a specific subclass:
class Node < Sequel::Model
plugin :tree
end
Doing this, only Node and future subclasses of Node will have the tree plugin loaded.
== Plugin Arguments/Options
Some plugins require arguments and/or support options. For example, the single_table_inheritance plugin requires an argument containing the column that specifies the class to use, and options:
class Employee < Sequel::Model
plugin :single_table_inheritance, :type_id, model_map: {1=>:Staff, 2=>:Manager}
end
You should read the documentation for the plugin to determine if it requires arguments and what if any options are supported.
== Creating Plugins
The simplest possible plugin is an empty module in a file stored in sequel/plugins/plugin_name somewhere in ruby's load path:
module Sequel
module Plugins
module PluginName
end
end
end
Well, technically, that's not the simplest possible plugin, but it is the simplest one you can load by name. The absolute simplest plugin would be an empty module:
Sequel::Model.plugin Module.new
== Example Formatting
In general, loading plugins by module instead of by name is not recommended, so this guide will assume that plugins are loaded by name. For simplicity, we'll also use the following format for example plugin code (and assume a plugin named Foo stored in sequel/plugins/foo):
module Sequel::Plugins::Foo
end
This saves 4 lines per example. However, it's recommended that you use the nested example displayed earlier for production code.
The examples also assume that the following model class exists:
class Bar < Sequel::Model
end
== Adding Class Methods
If you want your plugin to add class methods to the model class it is loaded into, define a ClassMethods module under the plugin module:
module Sequel::Plugins::Foo
module ClassMethods
def a
1
end
end
end
This allows a plugin user to do:
Bar.plugin :foo
Bar.a # => 1
== Adding Instance Methods
If you want your plugin to add instance methods to the model class it is loaded into, define an InstanceMethods module under the plugin module:
module Sequel::Plugins::Foo
module InstanceMethods
def a
1
end
end
end
This allows a plugin user to do:
Bar.plugin :foo
Bar.new.a # => 1
== Adding Dataset Methods
If you want your plugin to add methods to the dataset of the model class it is loaded into, define a DatasetMethods module under the plugin module:
module Sequel::Plugins::Foo
module DatasetMethods
def a
1
end
end
end
This allows a plugin user to do:
Bar.plugin :foo
Bar.dataset.a # => 1
== Calling super to get Previous Behavior
No matter if you are dealing with class, instance, or dataset methods, you can call super inside the method to get the previous behavior. This makes it easy to hook into the method, add your own behavior, but still get the previous behavior:
module Sequel::Plugins::Foo
module InstanceMethods
def save
if allow_saving?
super
else
raise Sequel::Error, 'saving not allowed for this object'
end
end
private
def allow_saving?
moon =~ /Waxing/
end
end
end
== Running Code When the Plugin is Loaded
Some plugins require more than just adding methods. Any plugin that requires state is going to have to initialize that state and store it somewhere (generally in the model class itself). If you want to run code when a plugin is loaded (usually to initialize state, but possibly for other reasons), there are two methods you can define to do so. The first method is apply, and it is called only the first time the plugin is loaded into the class, before it is loaded into the class. This is generally only used if a plugin depends on another plugin or for initializing state. You define this method as a singleton method of the plugin module:
module Sequel::Plugins::Foo
def self.apply(model)
model.instance_eval do
plugin :plugin_that_foo_depends_on
@foo_states = {}
end
end
end
The other method is called configure, and it is called everytime the plugin is loaded into the class, after it is loaded into the class:
module Sequel::Plugins::Foo
def self.configure(model)
model.instance_eval do
@foo_states[:initial] ||= :baz
end
end
end
Note that in the configure method, you know apply has already been called at least once (so @foo_state will definitely exist).
If you want your plugin to take arguments and/or support options, you handle that by making your apply and configure methods take arguments and/or an options hash. For example, if you want the user to be able to set the initial state via an option, you can do:
module Sequel::Plugins::Foo
def self.apply(model, opts={})
model.instance_eval do
plugin :plugin_foo_depends_on
@foo_states = {}
end
end
def self.configure(model, opts={})
model.instance_eval do
@foo_states[:initial] = opts[:initial_state] || @foo_states[:initial] || :baz
end
end
end
This allows a user of the plugin to do either of the following
Bar.plugin :foo
Bar.plugin :foo, initial_state: :quux
If you want to require the initial state to be provided as an argument:
module Sequel::Plugins::Foo
def self.apply(model, initial_state)
model.instance_eval do
plugin :plugin_foo_depends_on
@foo_states = {}
end
end
def self.configure(model, initial_state)
model.instance_eval do
@foo_states[:initial] = initial_state
end
end
end
This requires that the user of the plugin specify the argument:
Bar.plugin :foo, :quux
In general you should only require plugin arguments if you absolutely must have a value and there is no good default.
== Handling Subclasses
Sequel::Model uses a copy-on-subclassing approach to model state. So instead of having a model subclass ask its superclass for a value if the subclass don't have the value defined, the value should be copied from the parent class to the subclass when the subclass is created. While this can be implemented by overriding the +inherited+ class method, there is an available shortcut that handles most cases:
module Sequel::Plugins::Foo
module ClassMethods
Sequel::Plugins.inherited_instance_variables(self, :@foo_states => :dup)
end
end
Inside the ClassMethods submodule, you call the Sequel::Plugins.inherited_instance_variables method with the first argument being self. The second argument should be a hash describing how to copy the value from the parent class into the subclass. The keys of this hash are instance variable names, including the @ symbol (e.g. :@foo_state). The values of this hash describe how to copy it:
nil :: Use the value directly.
:dup :: Call dup on the value.
:hash_dup :: Create a new hash with the same keys, but a dup of all the values.
Proc :: An arbitrary proc that is called with the parent class value and should return the value to set into the subclass.
== Handling Changes to the Model's Dataset
In many plugins, if the model class changes the dataset, you need to change the state for the plugin. While you can do this by overriding the set_dataset class method, there is an available shortcut:
module Sequel::Plugins::Foo
module ClassMethods
Sequel::Plugins.after_set_dataset(self, :set_foo_table)
private
def set_foo_table
@foo_states[:table] = table_name
end
end
end
With this code, any time the model's dataset changes, the state of the plugin will be updated to set the correct table name. This is also called when creating a new model class with a dataset.
== Making Dataset Methods Callable as Class Methods
In some cases, when dataset methods are added, you want to also create a model class method that will call the dataset method, so you can write:
Model.method
instead of:
Model.dataset.method
There is an available shortcut that automatically creates the class methods:
module Sequel::Plugins::Foo
module ClassMethods
Sequel::Plugins.def_dataset_methods(self, :quux)
end
module DatasetMethods
def quux
2
end
end
end
sequel-5.63.0/doc/mssql_stored_procedures.rdoc 0000664 0000000 0000000 00000002304 14342141206 0021467 0 ustar 00root root 0000000 0000000 = Stored Procedures in MSSQL
This guide documents the workaround implemented to allow executing stored procedures
in MSSQL, as well as getting the value of output variables.
== Simple Execution
The following stored procedure is used as an example:
CREATE PROCEDURE dbo.SequelTest(
@Input varchar(25),
@Output int OUTPUT
)
AS
SET @Output = LEN(@Input)
RETURN 0
Execute it as follows:
DB.call_mssql_sproc(:SequelTest, {args: ['Input String', :output]})
Use the +:output+ symbol to denote an output variable. The result will contain a
hash of the output variables, as well as the result code and number of affected rows:
{:result => 0, :numrows => 1, :var1 => "1"}
Output variables will be strings by default. To specify their type, include the
SQL type:
DB.call_mssql_sproc(:SequelTest, {args: ['Input String', [:output, 'int']]})
Result:
{:result => 0, :numrows => 1, :var1 => 1}
Output variables will be named +var#{n}+ where n is their zero indexed position
in the parameter list. To name the output variable, include their name:
DB.call_mssql_sproc(:SequelTest, {args: ['Input String', [:output, nil, 'Output']]})
Result:
{:result => 0, :numrows => 1, :output => "1"}
sequel-5.63.0/doc/object_model.rdoc 0000664 0000000 0000000 00000045033 14342141206 0017151 0 ustar 00root root 0000000 0000000 = The Sequel Object Model
Sequel's dataset layer is mostly structured as an DSL, so it often obscures
what actual objects are being used. For example, you don't usually create
Sequel objects by calling #new on the object's class (other than Sequel::Model
instances). However, just as almost everything in ruby is an object, all
the methods you call in Sequel deal with objects behind the scenes.
In addition to the standard ruby types, there are four main types of
Sequel-specific objects that you deal when programming with Sequel:
* Sequel::Database
* Sequel::Dataset
* Sequel::Model
* Sequel::SQL::Expression (and subclasses)
== Sequel::Database
Sequel::Database is the main Sequel object that you deal with. It's usually
created by the Sequel.connect method:
DB = Sequel.connect('postgres://host/database')
A Sequel::Database object represents the database you are connecting to.
Sequel::Database handles things like Sequel::Dataset creation,
dataset = DB[:table]
schema modification,
DB.create_table(:table) do
primary_key :id
String :name
end
and transactions:
DB.transaction do
DB[:table].insert(column: value)
end
Sequel::Database#literal can be used to take any object that Sequel handles
and literalize the object to an SQL string fragment:
DB.literal(DB[:table]) # (SELECT * FROM "table")
== Sequel::Dataset
Sequel::Dataset objects represent SQL queries. They are created from
a Sequel::Database object:
dataset = DB[:table] # SELECT * FROM "table"
dataset = DB.from(table) # SELECT * FROM "table"
dataset = DB.select(:column) # SELECT "column"
Most Sequel::Dataset methods that do not execute queries return modified
copies of the receiver, and the general way to build queries in Sequel is
via a method chain:
dataset = DB[:test].
select(:column1, :column2).
where(column3: 4).
order(:column5)
Such a method chain is a more direct way of doing:
dataset = DB[:test]
dataset = dataset.select(:column1, :column2)
dataset = dataset.where(column3: 4)
dataset = dataset.order(:column5)
When you are ready to execute your query, you call one of the Sequel::Dataset
action methods. For returning rows, you can do:
dataset.first
dataset.all
dataset.each{|row| row}
For inserting, updating, or deleting rows, you can do:
dataset.insert(column: value)
dataset.update(column: value)
dataset.delete
All datasets are related to their database object, which you can access via
the Sequel::Dataset#db method:
dataset.db # => DB
== Sequel::Model
Sequel::Model classes are wrappers around a particular Sequel::Dataset object that
add custom behavior, both custom behavior for the entire set of rows in the dataset
(the model's class methods), custom behavior for a subset of rows in the dataset
(the model's dataset methods), and custom behavior for single rows in the dataset
(the model's instance methods).
Unlike most other Sequel objects, Sequel::Model classes and instances are
generally created by the user using standard ruby syntax:
class Album < Sequel::Model
end
album = Album.new
Model classes that use a non-default Database instance or table name generally
use the Sequel::Model method to create the superclass:
class Album < Sequel::Model(DB[:music_albums])
end
album = Album.new
All model classes are related to their Sequel::Dataset object, which you
can access via the Sequel::Model.dataset method:
Album.dataset # SELECT * FROM "albums"
Additionally, all model classes are related to their dataset's Sequel::Database
object, which you can access via the Sequel::Model.db method:
Album.db # => DB
== Standard Ruby Types
Where possible, Sequel uses ruby's standard types to represent SQL concepts.
In the examples here, the text to the right side of the # sign is the output
if you pass the left side to Sequel::Database#literal.
=== Symbol
Ruby symbols represent SQL identifiers (tables, columns, schemas):
:schema # "schema"
:table # "table"
:column # "column"
=== Integer, Float, BigDecimal, String, Date, Time, DateTime
Ruby's Integer, Float, BigDecimal, String, Date, Time, and DateTime classes
represent similar types in SQL:
1 # 1
1.0 # 1.0
BigDecimal.new('1.0') # 1.0
"string" # 'string'
Date.new(2012, 5, 6) # '2012-05-06'
Time.now # '2012-05-06 10:20:30'
DateTime.now # '2012-05-06 10:20:30'
=== Hash
Sequel generally uses hash objects to represent equality:
{column: 1} # ("column" = 1)
However, if you use an array as the hash value, it represents inclusion in the value list:
{column: [1, 2, 3]} # ("column" IN (1, 2, 3))
You can also use a Sequel::Dataset instance as the hash value, which will be used to
represent inclusion in the subselect:
{column: DB[:table].select(:column)} # ("column" IN (SELECT "column" FROM "table"))
If you pass true, false, or nil as the hash value, it represents identity:
{column: nil} # ("column" IS NULL)
If you pass a Range object, it will be used as the bounds for a greater than and less than
operation:
{column: 1..2} # (("column" >= 1) AND ("column" <= 2))
{column: 1...3} # (("column" >= 1) AND ("column" < 3))
If you pass a Regexp object as the value, it will be used as a regular expression
operation if the database supports it:
{column: /a.*b/} # ("column" ~ 'a.*b')
=== Array
Sequel generally treats arrays as an SQL value list:
[1, 2, 3] # (1, 2, 3)
However, if all members of the array are arrays with two members, then the array is treated like
a hash:
[[:column, 1]] # ("column" = 1)
The advantage of using an array over a hash for such a case is that a hash cannot include
multiple objects with the same key, while the array can.
== Sequel::SQL::Expression (and subclasses)
If Sequel needs to represent an SQL concept that does not map directly to an existing
ruby class, it will generally use a Sequel::SQL::Expression subclass to represent that
concept.
Some of the examples below show examples that require the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc].
=== Sequel::LiteralString
Sequel::LiteralString is not actually a Sequel::SQL::Expression subclass. It is
a subclass of String, but it is treated specially by Sequel, in that it is treated
as literal SQL code, instead of as an SQL string that needs to be escaped:
Sequel::LiteralString.new("co'de") # co'de
The following shortcuts exist for creating Sequel::LiteralString objects:
Sequel.lit("co'de")
"co'de".lit # core_extensions extension
=== Sequel::SQL::Blob
Sequel::SQL::Blob is also a String subclass, but it is treated as an SQL blob
instead of an SQL string, as SQL blobs often have different literalization rules
than SQL strings do:
Sequel::SQL::Blob.new("blob")
The following shortcuts exist for creating Sequel::SQL::Blob objects:
Sequel.blob("blob")
"blob".to_sequel_blob # core_extensions extension
=== Sequel::SQLTime
Sequel::SQLTime is a Time subclass. However, it is treated specially by Sequel
in that only the time component is literalized, not the date part. This type
is used to represent SQL time types, which do not contain date information.
Sequel::SQLTime.create(10, 20, 30) # "10:20:30"
=== Sequel::SQL::ValueList
Sequel::SQL::ValueList objects always represent SQL value lists. Most ruby arrays
represent value lists in SQL, except that arrays of two-element arrays are treated
similar to hashes. Such arrays can be wrapped in this class to ensure they are
treated as value lists. This is important when doing a composite key IN lookup,
which some databases support. Sequel::SQL::ValueList is an ::Array subclass with
no additional behavior, so it can be instantiated like a normal array:
Sequel::SQL::ValueList.new([[1, 2], [3, 4]]) # ((1, 2), (3, 4))
In general, you don't need to create Sequel::SQL::ValueList instances manually,
they will be created automatically where they are required in most cases.
The following shortcuts exist for creating Sequel::SQL::ValueList objects:
Sequel.value_list([[1, 2], [3, 4]])
[[1, 2], [3, 4]].sql_value_list # core_extensions extension
=== Sequel::SQL::Identifier
Sequel::SQL::Identifier objects represent single identifiers. The main reason for
their existence is they support many additional Sequel specific methods that are
not supported on plain symbols:
Sequel::SQL::Identifier.new(:colum) # "col"
The following shortcuts exist for creating Sequel::SQL::Identifier objects:
Sequel[:column]
Sequel.identifier(:column)
:column.identifier # core_extensions extension
=== Sequel::SQL::QualifiedIdentifier
Sequel::SQL::QualifiedIdentifier objects represent qualified identifiers:
Sequel::SQL::QualifiedIdentifier.new(:table, :column) # "table"."column"
The following shortcuts exist for creating Sequel::SQL::QualifiedIdentifier objects:
Sequel[:table][:column]
Sequel.qualify(:table, :column)
:column.qualify(:table) # core_extensions extension
=== Sequel::SQL::AliasedExpression
Sequel::SQL::AliasedExpression objects represent aliased expressions in SQL. The alias
is treated as an identifier, but the expression can be an arbitrary Sequel expression:
Sequel::SQL::AliasedExpression.new(:column, :alias)
# "column" AS "alias"
Derived column lists are also supported:
Sequel::SQL::AliasedExpression.new(:table, :alias, [:column_alias1, :column_alias2])
# "table" AS "alias"("column_alias1", "column_alias2")
The following shortcuts exist for creating Sequel::SQL::AliasedExpression objects:
Sequel[:column].as(:alias)
Sequel.as(:column, :alias)
Sequel.as(:column, :alias, [:column_alias1, :column_alias2])
:column.as(:alias) # core_extensions or symbol_as extension
=== Sequel::SQL::ComplexExpression
Sequel::SQL::ComplexExpression objects mostly represent SQL operations with arguments.
There are separate subclasses for representing boolean operations such as AND and OR
(Sequel::SQL::BooleanExpression), mathematical operations such as + and -
(Sequel::SQL::NumericExpression), and string operations such as || and LIKE
(Sequel::SQL::StringExpression).
Sequel::SQL::BooleanExpression.new(:OR, :col1, :col2) # ("col1" OR "col2")
Sequel::SQL::NumericExpression.new(:+, :column, 2) # ("column" + 2)
Sequel::SQL::StringExpression.new(:"||", :column, "b") # ("column" || 'b')
There are many shortcuts for creating Sequel::SQL::ComplexExpression objects:
Sequel.or(:col1, :col2)
:col1 | :col2 # core_extensions extension
Sequel.+(:column, 2)
:column + 2 # core_extensions extension
Sequel.join([:column, 'b'])
:column + 'b' # core_extensions extension
=== Sequel::SQL::CaseExpression
Sequel::SQL::CaseExpression objects represent SQL CASE expressions, which represent
branches in the database, similar to ruby case expressions. Like ruby's case
expressions, these case expressions can have a implicit value you are comparing
against:
Sequel::SQL::CaseExpression.new({2=>1}, 0, :a) # CASE "a" WHEN 2 THEN 1 ELSE 0 END
Or they can treat each condition separately:
Sequel::SQL::CaseExpression.new({{a: 2}=>1}, 0) # CASE WHEN ("a" = 2) THEN 1 ELSE 0 END
In addition to providing a hash, you can also provide an array of two-element arrays:
Sequel::SQL::CaseExpression.new([[2, 1]], 0, :a) # CASE "a" WHEN 2 THEN 1 ELSE 0 END
The following shortcuts exist for creating Sequel::SQL::CaseExpression objects:
Sequel.case({2=>1}, 0, :a)
Sequel.case({{a: 2}=>1}, 0)
{2=>1}.case(0, :a) # core_extensions extension
{{a: 2}=>1}.case(0) # core_extensions extension
=== Sequel::SQL::Cast
Sequel::SQL::Cast objects represent CAST expressions in SQL, which does explicit
typecasting in the database. With Sequel, you provide the expression to typecast
as well as the type to cast to. The type can either be a generic type, given as
a ruby class:
Sequel::SQL::Cast.new(:a, String) # (CAST "a" AS text)
or a specific type, given as a symbol or string:
Sequel::SQL::Cast.new(:a, :int4) # (CAST "a" AS int4)
The following shortcuts exist for creating Sequel::SQL::Cast objects:
Sequel.cast(:a, String)
Sequel.cast(:a, :int4)
:a.cast(String) # core_extensions extension
:a.cast(:int4) # core_extensions extension
=== Sequel::SQL::ColumnAll
Sequel::SQL::ColumnAll objects represent the selection of all columns from a table:
Sequel::SQL::ColumnAll.new(:table) # "table".*
The following shortcut exists for creating Sequel::SQL::ColumnAll objects:
Sequel[:table].*
Sequel[:schema][:table].*
:table.* # core_extensions extension
=== Sequel::SQL::Constant
Sequel::SQL::Constant objects represent constants or pseudo-constants in SQL,
such as TRUE, NULL, and CURRENT_TIMESTAMP. These are not designed to be created
or used by the end user, but some existing values are predefined under the
Sequel namespace:
Sequel::CURRENT_TIMESTAMP # CURRENT_TIMESTAMP
These objects are usually used as values in queries:
DB[:table].insert(time: Sequel::CURRENT_TIMESTAMP)
=== Sequel::SQL::DelayedEvaluation
Sequel::SQL::DelayedEvaluation objects represent an evaluation that is delayed
until query literalization.
Sequel::SQL::DelayedEvaluation.new(proc{some_model.updated_at})
The following shortcut exists for creating Sequel::SQL::DelayedEvaluation
objects:
Sequel.delay{some_model.updated_at}
Note how Sequel.delay requires a block, while Sequel::SQL::DelayedEvaluation.new
accepts a generic callable object.
Let's say you wanted a dataset for the number of objects greater than some
attribute of another object:
ds = DB[:table].where{updated_at > some_model.updated_at}
The problem with the above query is that it evaluates "some_model.updated_at"
statically, so if you change some_model.updated_at later, it won't affect this
dataset. You can use Sequel.delay to fix this:
ds = DB[:table].where{updated_at > Sequel.delay{some_model.updated_at}}
This will evaluate "some_model.updated_at" every time you literalize the
dataset (usually every time it is executed).
=== Sequel::SQL::Function
Sequel::SQL::Function objects represents database function calls, which take a function
name and any arguments:
Sequel::SQL::Function.new(:func, :a, 2) # func("a", 2)
The following shortcuts exist for creating Sequel::SQL::Function objects:
Sequel.function(:func, :a, 2)
:func.sql_function(:a, 2) # core_extensions extension
=== Sequel::SQL::JoinClause
Sequel::SQL::JoinClause objects represent SQL JOIN clauses. They are usually
not created manually, as the Dataset join methods create them automatically.
=== Sequel::SQL::PlaceholderLiteralString
Sequel::SQL::PlaceholderLiteralString objects represent a literal SQL string
with placeholders for variables. There are three types of these objects.
The first type uses question marks with multiple placeholder value objects:
Sequel::SQL::PlaceholderLiteralString.new('? = ?', [:a, 1]) # "a" = 1
The second uses named placeholders with colons and a hash of placeholder
value objects:
Sequel::SQL::PlaceholderLiteralString.new(':b = :v', [{b: :a, v: 1}]) # "a" = 1
The third uses an array instead of a string, with multiple placeholder
objects, each one going in between the members of the array:
Sequel::SQL::PlaceholderLiteralString.new(['', ' = '], [:a, 1]) # "a" = 1
For any of these three forms, you can also include a third argument for whether
to include parentheses around the string:
Sequel::SQL::PlaceholderLiteralString.new('? = ?', [:a, 1], true) # ("a" = 1)
The following shortcuts exist for creating Sequel::SQL::PlaceholderLiteralString
objects:
Sequel.lit('? = ?', :a, 1)
Sequel.lit(':b = :v', b: :a, v: 1)
Sequel.lit(['', ' = '], :a, 1)
'? = ?'.lit(:a, 1) # core_extensions extension
':b = :v'.lit(b: :a, v: 1) # core_extensions extension
=== Sequel::SQL::OrderedExpression
Sequel::SQL::OrderedExpression objects represent ascending or descending sorts,
used by the Dataset order methods. They take an expression, and whether to sort
it ascending or descending:
Sequel::SQL::OrderedExpression.new(:a) # "a" DESC
Sequel::SQL::OrderedExpression.new(:a, false) # "a" ASC
Additionally, they take an options hash, which can be used to specify how nulls
can be sorted:
Sequel::SQL::OrderedExpression.new(:a, true, nulls: :first) # "a" DESC NULLS FIRST
Sequel::SQL::OrderedExpression.new(:a, false, nulls: :last) # "a" ASC NULLS LAST
The following shortcuts exist for creating Sequel::SQL::OrderedExpression objects:
Sequel.asc(:a)
Sequel.desc(:a)
Sequel.asc(:a, nulls: :first)
Sequel.desc(:a, nulls: :last)
:a.asc # core_extensions extension
:a.desc # core_extensions extension
:a.asc(nulls: :first) # core_extensions extension
:a.desc(nulls: :last) # core_extensions extension
=== Sequel::SQL::Subscript
Sequel::SQL::Subscript objects represent SQL database array access. They take an
expression and an array of indexes (or a range for an SQL array slice):
Sequel::SQL::Subscript.new(:a, [1]) # "a"[1]
Sequel::SQL::Subscript.new(:a, [1, 2]) # "a"[1, 2]
Sequel::SQL::Subscript.new(:a, [1..2]) # "a"[1:2]
The following shortcuts exist for creating Sequel::SQL::Subscript objects:
Sequel.subscript(:a, 1)
Sequel.subscript(:a, 1, 2)
Sequel.subscript(:a, 1..2)
:a.sql_subscript(1) # core_extensions extension
:a.sql_subscript(1, 2) # core_extensions extension
:a.sql_subscript(1..2) # core_extensions extension
=== Sequel::SQL::VirtualRow
Sequel::SQL::VirtualRow is a BasicObject subclass that is the backbone behind the
block expression support:
DB[:table].where{a < 1}
In the above code, the block is instance-evaled inside a VirtualRow instance.
These objects are usually not instantiated manually. See the
{Virtual Row Guide}[rdoc-ref:doc/virtual_rows.rdoc] for details.
=== Sequel::SQL::Window
Sequel::SQL::Window objects represent the windows used by Sequel::SQL::Function.
They use a hash-based API, supporting the :frame, :order, :partition, and :window
options:
Sequel::SQL::Window.new(order: :a) # (ORDER BY "a")
Sequel::SQL::Window.new(partition: :a) # (PARTITION BY "a")
Sequel::SQL::Window.new(partition: :a, frame: :all)
# (PARTITION BY "a" ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
=== Sequel::SQL::Wrapper
Sequel::SQL::Wrapper objects wrap arbitrary objects so that they can be used
in Sequel expressions:
o = Object.new
def o.sql_literal_append(ds, sql) sql << "foo" end
Sequel::SQL::Wrapper.new(o) # foo
The advantage of wrapping the object is that you can the call Sequel methods
on the wrapper that would not be defined on the object itself:
Sequel::SQL::Wrapper.new(o) + 1 # (foo + 1)
You can use the Sequel.[] method to wrap any object:
Sequel[o]
However, note that that does not necessarily return a Sequel::SQL::Wrapper
object, it may return a different class of object, such as a
Sequel::SQL::ComplexExpression subclass object.
sequel-5.63.0/doc/opening_databases.rdoc 0000664 0000000 0000000 00000054704 14342141206 0020176 0 ustar 00root root 0000000 0000000 = Connecting to a database
All Sequel activity begins with connecting to a database, which creates a
Sequel::Database object. The Database object is used to create datasets and execute
queries. Sequel provides a powerful and flexible mechanism for connecting to
databases. There are two main ways to establish database connections:
1. Using the Sequel.connect method
2. Using the specialized adapter method (Sequel.sqlite, Sequel.postgres, etc.)
The connection options needed depend on the adapter being used, though most adapters
share the same basic connection options.
If you are only connecting to a single database, it is recommended that you store the
database object in a constant named DB. This is not required, but it is the
convention that most Sequel code uses.
== Using the Sequel.connect method
The connect method usually takes a well-formed URI, which is parsed into connection options needed to open
the database connection. The scheme/protocol part of the URI is used to determine the adapter to use:
DB = Sequel.connect('postgres://user:password@localhost/blog') # Uses the postgres adapter
You can use URI query parameters to specify options:
DB = Sequel.connect('postgres://localhost/blog?user=user&password=password')
You can also pass an additional option hash with the connection string:
DB = Sequel.connect('postgres://localhost/blog', user: 'user', password: 'password')
You can also just use an options hash without a connection string. If you do this, you must
provide the adapter to use:
DB = Sequel.connect(adapter: 'postgres', host: 'localhost', database: 'blog', user: 'user', password: 'password')
All of the above statements are equivalent.
== Using the specialized adapter method
The specialized adapter method is similar to Sequel.connect with an options hash, except that it
automatically populates the :adapter option and assumes the first argument is the :database option,
unless the first argument is a hash. So the following statements are equivalent to the previous statements.
DB = Sequel.postgres('blog', host: 'localhost', user: 'user', password: 'password')
DB = Sequel.postgres(host: 'localhost', user: 'user', password: 'password', database: 'blog')
Note that using an adapter method forces the use of the specified adapter, not a database type, even
though some adapters have the same name as the database type. So if you
want to connect to SQLite, for example, you can do so using the sqlite, amalgalite, and jdbc adapters.
If you want to connect to SQLite on JRuby using the jdbc adapter, you should not use Sequel.sqlite
for example, as that uses the C-based sqlite3 gem. Instead, the Sequel.jdbc would be appropriate (though
as mentioned below, using Sequel.connect is recommended instead of Sequel.jdbc).
== Passing a block to either method
Both the Sequel.connect method and the specialized adapter methods take a block. If you
provide a block to the method, Sequel will create a Database object and pass it as an argument
to the block. When the block returns, Sequel will disconnect the database connection.
For example:
Sequel.connect('sqlite://blog.db'){|db| puts db[:users].count}
Note that if you do not pass a block to Sequel.connect, Sequel will automatically retain a
reference to the object in the Sequel::DATABASES array. So calling +Sequel.connect+
multiple times (say once per request), can result in a memory leak. For any application where
database access is needed for a long period of time, it's best to store the result of
Sequel.connection in a constant, as recommended above.
== Using the Sequel.connect method
== General connection options
These options are shared by all adapters unless otherwise noted.
:adapter :: The adapter to use
:database :: The name of the database to which to connect
:extensions :: Extensions to load into this Database instance. Can be a symbol, array of symbols,
or string with extensions separated by columns. These extensions are loaded after
connections are made by the :preconnect option.
:cache_schema :: Whether schema should be cached for this database (true by default)
:default_string_column_size :: The default size for string columns (255 by default)
:host :: The hostname of the database server to which to connect
:keep_reference :: Whether to keep a reference to the database in Sequel::DATABASES (true by default)
:logger :: A specific SQL logger to log to
:loggers :: An array of SQL loggers to log to
:log_connection_info :: Whether to include connection information in log messages (false by default)
:log_warn_duration :: The amount of seconds after which the queries are logged at :warn level
:password :: The password for the user account
:preconnect :: Whether to automatically make the maximum number of connections when setting up the pool.
Can be set to "concurrently" to connect in parallel.
:preconnect_extensions :: Similar to the :extensions option, but loads the extensions before the
connections are made by the :preconnect option.
:quote_identifiers :: Whether to quote identifiers.
:servers :: A hash with symbol keys and hash or proc values, used with primary/replica and sharded database configurations
:sql_log_level :: The level at which to issue queries to the loggers (:info by default)
:test :: Whether to test that a valid database connection can be made (true by default)
:user :: The user account name to use logging in
The following options can be specified and are passed to the database's internal connection pool.
:after_connect :: A callable object called after each new connection is made, with the
connection object (and server argument if the callable accepts 2 arguments),
useful for customizations that you want to apply to all connections (nil by default).
:connect_sqls :: An array of sql strings to execute on each new connection, after :after_connect runs.
:max_connections :: The maximum size of the connection pool (4 connections by default on most databases)
:pool_timeout :: The number of seconds to wait if a connection cannot be acquired before raising an error (5 seconds by default)
:single_threaded :: Whether to use a single-threaded (non-thread safe) connection pool
== Adapter specific connection options
The following sections explain the options and behavior specific to each adapter.
If the library the adapter requires is different from the name of the adapter
scheme, it is listed specifically, otherwise you can assume that is requires the
library with the same name.
=== ado
Requires: win32ole
The ADO adapter provides connectivity to ADO databases in Windows. It relies
on WIN32OLE library, so it isn't usable on other operating systems (except
possibly through WINE, but that's unlikely).
The following options are supported:
:command_timeout :: Sets the time in seconds to wait while attempting
to execute a command before cancelling the attempt and generating
an error. Specifically, it sets the ADO CommandTimeout property.
:driver :: The driver to use in the ADO connection string. If not provided, a default
of "SQL Server" is used.
:conn_string :: The full ADO connection string. If this is provided,
the usual options are ignored.
:provider :: Sets the Provider of this ADO connection (for example, "SQLOLEDB").
If you don't specify a provider, the default one used by WIN32OLE
has major problems, such as creating a new native database connection
for every query, which breaks things such as transactions and temporary tables.
Pay special attention to the :provider option, as without specifying a provider,
many things will be broken. The SQLNCLI10 and SQLNCLI11 providers work well if you
are connecting to Microsoft SQL Server, but it is not the default as it depends on
those providers being installed.
Example connections:
# SQL Server
Sequel.connect('ado:///sequel_test?host=server%5cdb_instance')
Sequel.connect('ado://user:password@server/database?host=server%5cdb_instance&provider=SQLNCLI10')
# Access 2007
Sequel.ado(conn_string: 'Provider=Microsoft.ACE.OLEDB.12.0;Data Source=drive:\\path\\filename.accdb')
# Access 2000
Sequel.ado(conn_string: 'Provider=Microsoft.Jet.OLEDB.4.0;Data Source=drive:\\path\\filename.mdb')
# Excel 2000 (for table names, use a dollar after the sheet name, e.g. Sheet1$)
Sequel.ado(conn_string: 'Provider=Microsoft.Jet.OLEDB.4.0;Data Source=drive:\\path\\filename.xls;Extended Properties=Excel 8.0;')
=== amalgalite
Amalgalite is an ruby extension that provides self contained access to SQLite,
so you don't need to install SQLite separately. As amalgalite is a file backed
database, the :host, :user, and :password options are not used.
:database :: The name of the database file
:timeout :: The busy timeout period given in milliseconds
Without a database argument, assumes a memory database, so you can do:
Sequel.amalgalite
Handles paths in the connection string similar to the SQLite adapter, so see
the sqlite section below for details.
=== ibmdb
requires 'ibm_db'
This connects to DB2 using IBM_DB. This is the recommended adapter if you are
using a C-based ruby to connect to DB2.
=== jdbc
Requires: java
Houses Sequel's JDBC support when running on JRuby.
Support for individual database types is done using subadapters.
There are currently subadapters for DB2, Derby, H2, HSQLDB, JTDS,
MySQL, Oracle, PostgreSQL, SQLAnywhere, SQLite, and SQL Server.
For Derby, H2, HSQLDB, JTDS, MySQL, Postgres, SQLite3
the adapters can use the `jdbc-*` gem, for the others you need to have the `.jar` in your CLASSPATH
or load the Java class manually before calling Sequel.connect.
Note that when using a JDBC adapter, the best way to use Sequel
is via Sequel.connect using a connection string, NOT Sequel.jdbc. Use the JDBC connection
string when connecting, which will be in a different format than
the native connection string. The connection string should start
with 'jdbc:'. For PostgreSQL, use 'jdbc:postgresql:', and for
SQLite you do not need 2 preceding slashes for the database name
(use no preceding slashes for a relative path, and one preceding
slash for an absolute path).
Sequel does no preprocessing of JDBC connection strings, it passes them directly to JDBC.
So if you have problems getting a connection string to work, look up the
documentation for the JDBC driver.
The jdbc adapter does not handle common options such as +:host+,
+:user+, and +:port+. If you must use a hash of options when connecting,
provide the full JDBC connection string as the :uri option.
Example connection strings:
jdbc:sqlite::memory:
jdbc:postgresql://localhost/database?user=username
jdbc:mysql://localhost/test?user=root&password=root&serverTimezone=UTC
jdbc:h2:mem:
jdbc:hsqldb:mem:mymemdb
jdbc:derby:memory:myDb;create=true
jdbc:sqlserver://localhost;database=sequel_test;integratedSecurity=true
jdbc:jtds:sqlserver://localhost/sequel_test;user=sequel_test;password=sequel_test
jdbc:oracle:thin:user/password@localhost:1521:database
jdbc:db2://localhost:3700/database:user=user;password=password;
jdbc:sqlanywhere://localhost?DBN=Test;UID=user;PWD=password
You can also use JNDI connection strings:
jdbc:jndi:java:comp/env/jndi_resource_name
The following additional options are supported:
:convert_types :: If set to false, does not attempt to convert some Java types to ruby types.
Setting to false roughly doubles performance when selecting large numbers of rows.
Note that you can't provide this option inside the connection string (as that is passed
directly to JDBC), you have to pass it as a separate option.
:driver :: Specify the Java driver class to use to connect to the database. This only has
an effect if the database type is not recognized from the connection string,
and only helps cases where java.sql.DriverManager.getConnection does not
return a connection.
:login_timeout :: Set the login timeout on the JDBC connection (in seconds).
:jdbc_properties :: A hash for properties to set, skips the normal connection process of using
java.sql.drivermanager.getconnection and tries the backup process of using
driver.new.connect for the appropriate driver.
There are a few issues with specific jdbc driver gems:
jdbc-h2 :: jdbc-h2 versions greater than 1.3.175 have issues with ORDER BY not working correctly in some cases.
jdbc-mysql :: Depending on the configuration of the MySQL server, jdbc-mysql versions greater 8 may complain
about the server time zone being unrecognized. You can either use an older jdbc-mysql version,
or you can specify the +serverTimezone+ option in the connection string, as shown in the example
jdbc:mysql connection string above.
=== mysql
Requires: mysql
This should work with the mysql gem (C extension) and the ruby-mysql gem (pure ruby).
The following additional options are supported:
:auto_is_null :: If set to true, makes "WHERE primary_key IS NULL" select the last inserted id.
:charset :: Same as :encoding, :encoding takes precedence.
:compress :: Whether to compress data sent/received via the socket connection.
:config_default_group :: The default group to read from the in the MySQL config file, defaults to "client")
:config_local_infile :: If provided, sets the Mysql::OPT_LOCAL_INFILE option on the connection with the given value.
:disable_split_materialized :: Set split_materialized=off in the optimizer settings. Necessary to pass the associations
integration tests in MariaDB 10.5+, due to a unfixed bug in the optimizer.
:encoding :: Specify the encoding/character set to use for the connection.
:fractional_seconds :: On MySQL 5.6.5+, this option is recognized and will include fractional seconds in
time/timestamp values, as well as have the schema method create columns that can contain
fractional seconds by deafult. This option is also supported on other adapters that connect
to MySQL.
:socket :: Can be used to specify a Unix socket file to connect to instead of a TCP host and port.
:sql_mode :: Set the sql_mode(s) for a given connection. Can be single symbol or string,
or an array of symbols or strings (e.g. sql_mode: [:no_zero_date, :pipes_as_concat]).
:timeout :: Sets the wait_timeout for the connection, defaults to 1 month.
:read_timeout :: Set the timeout in seconds for reading back results to a query.
:connect_timeout :: Set the timeout in seconds before a connection attempt is abandoned
(may not be supported when using MariaDB 10.2+ client libraries).
The :sslkey, :sslcert, :sslca, :sslcapath, and :sslca options (in that order) are passed to Mysql#ssl_set method
if either the :sslca or :sslkey option is given.
=== mysql2
This is a newer MySQL adapter that does typecasting in C, so it is often faster than the
mysql adapter. The options given are passed to Mysql2::Client.new, see the mysql2 documentation
for details on what options are supported. The :timeout, :auto_is_null, :sql_mode, and :disable_split_materialized
options supported by the mysql adapter are also supported for mysql2 adapter (and any other adapters connecting to
mysql, such as the jdbc/mysql adapter).
=== odbc
The ODBC adapter allows you to connect to any database with the appropriate ODBC drivers installed.
The :database option given ODBC database should be the DSN (Descriptive Service Name) from the ODBC configuration.
Sequel.odbc('mydb', user: "user", password: "password")
The :host and :port options are not respected. The following additional options are supported:
:db_type :: Can be specified as 'mssql', 'progress', or 'db2' to use SQL syntax specific to those databases.
:drvconnect :: Can be given an ODBC connection string, and will use ODBC::Database#drvconnect to
do the connection. Typical usage would be: Sequel.odbc(drvconnect: 'driver={...};...')
=== oracle
Requires: oci8
The following additional options are supported:
:autosequence :: Set to true to use Sequel's conventions to guess the sequence to use for datasets. False
by default.
:prefetch_rows :: The number of rows to prefetch. Defaults to 100, a larger number can be specified
and may improve performance when retrieving a large number of rows.
:privilege :: The Oracle privilege level.
=== postgres
Requires: pg (or sequel/postgres-pr or postgres-pr/postgres-compat if pg is not available)
The Sequel postgres adapter works with the pg, sequel-postgres-pr, jeremyevans-postgres-pr, and postgres-pr ruby libraries.
The pg library is the best supported, as it supports real bound variables and prepared statements.
If the pg library is being used, Sequel will also attempt to load the sequel_pg library, which is
a C extension that optimizes performance when Sequel is used with pg. All users of Sequel who
use pg are encouraged to install sequel_pg. For users who want to use one of the postgres-pr
libraries to avoid issues with C extensions, it is recommended to use sequel-postgres-pr.
The following additional options are supported:
:charset :: Same as :encoding, :encoding takes precedence
:convert_infinite_timestamps :: Whether infinite timestamps/dates should be converted on retrieval. By default, no
conversion is done, so an error is raised if you attempt to retrieve an infinite
timestamp/date. You can set this to :nil to convert to nil, :string to leave
as a string, or :float to convert to an infinite float.
:conn_str :: Use connection string (in form of `host=x port=y ...`). Ignores all other options, only supported with pg
library. See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING and
https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS for format and list of supported
options.
:connect_timeout :: Set the number of seconds to wait for a connection (default 20, only respected
if using the pg library).
:driver_options :: A hash of options to pass to the underlying driver (only respected if using the pg library)
:encoding :: Set the client_encoding to the given string
:notice_receiver :: A proc that be called with the PGresult objects that have notice or warning messages.
The default notice receiver just prints the messages to stderr, but this can be used
to handle notice/warning messages differently. Only respected if using the pg library).
:sslmode :: Set to 'disable', 'allow', 'prefer', 'require', 'verify-ca', or 'verify-full' to choose how to treat SSL (only
respected if using the pg library)
:sslrootcert :: Specify the path to the root SSL certificate to use.
:search_path :: Set to the schema search_path. This can either be a single string containing the schemas
separated by commas (for use via a URL: postgres:///?search_path=schema1,schema2), or it
can be an array of strings (for use via an option:
Sequel.postgres(search_path: ['schema1', 'schema2'])).
:use_iso_date_format :: This can be set to false to not force the ISO date format. Sequel forces
it by default to allow for an optimization.
=== sqlanywhere
The sqlanywhere driver works off connection strings, so a connection string
is built based on the url/options hash provided. The following additional
options are respected:
:commlinks :: specify the CommLinks connection string option
:conn_string :: specify the connection string to use, ignoring all other options
:connection_name :: specify the ConnectionName connection string option
:encoding :: specify the CharSet connection string option
=== sqlite
Requires: sqlite3
As SQLite is a file-based database, the :host and :port options are ignored, and
the :database option should be a path to the file.
Examples:
# In Memory databases:
Sequel.sqlite
Sequel.connect('sqlite:/')
Sequel.sqlite(':memory:')
# Relative Path
Sequel.sqlite('blog.db')
Sequel.sqlite('./blog.db')
Sequel.connect('sqlite://blog.db')
# Absolute Path
Sequel.sqlite('/var/sqlite/blog.db')
Sequel.connect('sqlite:///var/sqlite/blog.db')
The following additional options are supported:
:readonly :: open database in read-only mode
:timeout :: the busy timeout to use in milliseconds (default: 5000).
:setup_regexp_function :: Whether to setup a REGEXP function in the underlying SQLite3::Database object. Doing so
allows you to use regexp support in dataset expressions. Note that this creates a new
Regexp object per call to the function, so it is not an efficient implementation.
Note that SQLite memory databases are restricted to a single connection by
default. This is because SQLite does not allow multiple connections to
a single memory database. For this reason, Sequel sets the maximum number
of connections in the connection pool to 1 by default when an SQLite memory
database is used. Attempts to force the use of more than 1 connection
can result in weird behavior, since the connections will be to separate
memory databases.
=== tinytds
Requires: tiny_tds
The connection options are passed directly
to tiny_tds, except that the tiny_tds :username option is set to
the Sequel :user option. If you want to use an entry in the freetds.conf file, you
should specify the :dataserver option with that name as the value. Some other
options that you may want to set are :login_timeout, :timeout, :tds_version, :azure,
:appname, and :encoding, see the tiny_tds README for details.
Other Sequel specific options:
:ansi :: Set to true to enable the ANSI compatibility settings when connecting
(ANSI_NULLS, ANSI_PADDING, ANSI_WARNINGS, ANSI_NULL_DFLT_ON, QUOTED_IDENTIFIER,
CONCAT_NULL_YIELDS_NULL).
:server_version :: Override the server version to use (9000000 = SQL Server 2005).
This also works on any other adapter that connects to Microsoft
SQL Server.
:textsize :: Override the default TEXTSIZE setting for this connection. The FreeTDS
default is small (around 64000 bytes), but can be set up to around 2GB.
This should be specified as an integer. If you plan on setting large
text or blob values via tinytds, you should use this option or modify
your freetds.conf file.
sequel-5.63.0/doc/postgresql.rdoc 0000664 0000000 0000000 00000055732 14342141206 0016735 0 ustar 00root root 0000000 0000000 = PostgreSQL-specific Support in Sequel
Sequel's core database and dataset functions are designed to support the features
shared by most common SQL database implementations. However, Sequel's database
adapters extend the core support to include support for database-specific features.
By far the most extensive database-specific support in Sequel is for PostgreSQL. This
support is roughly broken into the following areas:
* Database Types
* DDL Support
* DML Support
* sequel_pg
Note that while this guide is extensive, it is not exhaustive. There are additional
rarely used PostgreSQL features that Sequel supports which are not mentioned here.
== Adapter/Driver Specific Support
Some of this this support depends on the specific adapter or underlying driver in use.
postgres only will denote support specific to the postgres adapter (i.e.
not available when connecting to PostgreSQL via the jdbc adapter).
postgres/pg only will denote support specific to the postgres adapter when
pg is used as the underlying driver (i.e. not available when using the postgres-pr
driver).
== PostgreSQL-specific Database Type Support
Sequel's default support on PostgreSQL only includes common database types. However,
Sequel ships with support for many PostgreSQL-specific types via extensions. In general,
you load these extensions via Database#extension. For example, to load support
for arrays, you would do:
DB.extension :pg_array
The following PostgreSQL-specific type extensions are available:
pg_array :: arrays (single and multidimensional, for any scalar type), as a ruby Array-like object
pg_hstore :: hstore, as a ruby Hash-like object
pg_inet :: inet/cidr, as ruby IPAddr objects
pg_interval :: interval, as ActiveSupport::Duration objects
pg_json :: json, as either ruby Array-like or Hash-like objects
pg_range :: ranges (for any scalar type), as a ruby Range-like object
pg_row :: row-valued/composite types, as a ruby Hash-like or Sequel::Model object
In general, these extensions just add support for Database objects to return retrieved
column values as the appropriate type and support for literalizing
the objects correctly for use in an SQL string, or using them as bound variable values (postgres/pg and jdbc/postgres only).
There are also type-specific extensions that make it easy to use database functions
and operators related to the type. These extensions are:
pg_array_ops :: array-related functions and operators
pg_hstore_ops :: hstore-related functions and operators
pg_json_ops :: json-related functions and operators
pg_range_ops :: range-related functions and operators
pg_row_ops :: row-valued/composite type syntax support
These extensions aren't Database specific, they are global extensions, so you should
load them via Sequel.extension, after loading support for the specific types
into the Database instance:
DB.extension :pg_array
Sequel.extension :pg_array_ops
With regard to common database types, please note that the generic String type
is +text+ on PostgreSQL and not varchar(255) as it is on some other
databases. +text+ is PostgreSQL's recommended type for storage of text data,
and is more similar to Ruby's String type as it allows for unlimited length.
If you want to set a maximum size for a text column, you must specify a
:size option. This will use a varchar($size) type and
impose a maximum size for the column.
== PostgreSQL-specific DDL Support
=== Exclusion Constraints
In +create_table+ blocks, you can use the +exclude+ method to set up exclusion constraints:
DB.create_table(:table) do
daterange :during
exclude([[:during, '&&']], name: :table_during_excl)
end
# CREATE TABLE "table" ("during" daterange,
# CONSTRAINT "table_during_excl" EXCLUDE USING gist ("during" WITH &&))
You can also add exclusion constraints in +alter_table+ blocks using add_exclusion_constraint:
DB.alter_table(:table) do
add_exclusion_constraint([[:during, '&&']], name: :table_during_excl)
end
# ALTER TABLE "table" ADD CONSTRAINT "table_during_excl" EXCLUDE USING gist ("during" WITH &&)
=== Adding Foreign Key and Check Constraints Without Initial Validation
You can add a not_valid: true option when adding constraints to existing tables so
that it doesn't check if all current rows are valid:
DB.alter_table(:table) do
# Assumes t_id column already exists
add_foreign_key([:t_id], :table, not_valid: true, name: :table_fk)
constraint({name: :col_123, not_valid: true}, col: [1,2,3])
end
# ALTER TABLE "table" ADD CONSTRAINT "table_fk" FOREIGN KEY ("t_id") REFERENCES "table" NOT VALID
# ALTER TABLE "table" ADD CONSTRAINT "col_123" CHECK (col IN (1, 2, 3)) NOT VALID
Such constraints will be enforced for newly inserted and updated rows, but not for existing rows. After
all existing rows have been fixed, you can validate the constraint:
DB.alter_table(:table) do
validate_constraint(:table_fk)
validate_constraint(:col_123)
end
# ALTER TABLE "table" VALIDATE CONSTRAINT "table_fk"
# ALTER TABLE "table" VALIDATE CONSTRAINT "col_123"
=== Creating Indexes Concurrently
You can create indexes concurrently using the concurrently: true option:
DB.add_index(:table, :t_id, concurrently: true)
# CREATE INDEX CONCURRENTLY "table_t_id_index" ON "table" ("t_id")
Similarly, you can drop indexes concurrently as well:
DB.drop_index(:table, :t_id, concurrently: true)
# DROP INDEX CONCURRENTLY "table_t_id_index"
=== Specific Conversions When Altering Column Types
When altering a column type, PostgreSQL allows the user to specify how to do the
conversion via a USING clause, and Sequel supports this using the :using option:
DB.alter_table(:table) do
# Assume unix_time column is stored as an integer, and you want to change it to timestamp
set_column_type :unix_time, Time, using: (Sequel.cast('epoch', Time) + Sequel.cast('1 second', :interval) * :unix_time)
end
# ALTER TABLE "table" ALTER COLUMN "unix_time" TYPE timestamp
# USING (CAST('epoch' AS timestamp) + (CAST('1 second' AS interval) * "unix_time"))
=== Creating Partitioned Tables
PostgreSQL allows marking tables as partitioned tables, and adding partitions to such tables. Sequel
offers support for this. You can create a partitioned table using the +:partition_by+ option and
+:partition_type+ options (the default partition type is range partitioning):
DB.create_table(:table1, partition_by: :column, partition_type: :range) do
Integer :id
Date :column
end
DB.create_table(:table2, partition_by: :column, partition_type: :list) do
Integer :id
String :column
end
DB.create_table(:table3, partition_by: :column, partition_type: :hash) do
Integer :id
Integer :column
end
To add partitions of other tables, you use the +:partition_of+ option. This option will use
a custom DSL specific to partitioning other tables. For range partitioning, you can use the
+from+ and +to+ methods to specify the inclusive beginning and exclusive ending of the
range of the partition. You can call the +minvalue+ and +maxvalue+ methods to get the minimum
and maximum values for the column(s) in the range, useful as arguments to +from+ and +to+:
DB.create_table(:table1a, partition_of: :table1) do
from minvalue
to 0
end
DB.create_table(:table1b, partition_of: :table1) do
from 0
to 100
end
DB.create_table(:table1c, partition_of: :table1) do
from 100
to maxvalue
end
For list partitioning, you use the +values_in+ method. You can also use the +default+ method
to mark a partition as the default partition:
DB.create_table(:table2a, partition_of: :table2) do
values_in 1, 2, 3
end
DB.create_table(:table2b, partition_of: :table2) do
values_in 4, 5, 6
end
DB.create_table(:table2c, partition_of: :table2) do
default
end
For hash partitioning, you use the +modulus+ and +remainder+ methods:
DB.create_table(:table3a, partition_of: :table3) do
modulus 3
remainder 0
end
DB.create_table(:table3b, partition_of: :table3) do
modulus 3
remainder 1
end
DB.create_table(:table3c, partition_of: :table3) do
modulus 3
remainder 2
end
There is currently no support for using custom column or table constraints in partitions of
other tables. Support may be added in the future.
=== Creating Unlogged Tables
PostgreSQL allows users to create unlogged tables, which are faster but not crash safe. Sequel
allows you to create an unlogged table by specifying the unlogged: true option to +create_table+:
DB.create_table(:table, unlogged: true){Integer :i}
# CREATE UNLOGGED TABLE "table" ("i" integer)
=== Creating Identity Columns
You can use the +:identity+ option when creating columns to mark them as identity columns.
Identity columns are tied to a sequence for the default value. You can still override the
default value for the column when inserting:
DB.create_table(:table){Integer :id, identity: true}
# CREATE TABLE "table" ("id" integer GENERATED BY DEFAULT AS IDENTITY)
If you want to disallow using a user provided value when inserting, you can mark the
identity column using identity: :always:
DB.create_table(:table){Integer :id, identity: :always}
# CREATE TABLE "table" ("id" integer GENERATED ALWAYS AS IDENTITY)
=== Creating/Dropping Schemas, Languages, Functions, and Triggers
Sequel has built in support for creating and dropping PostgreSQL schemas, procedural languages, functions, and triggers:
DB.create_schema(:s)
# CREATE SCHEMA "s"
DB.drop_schema(:s)
# DROP SCHEMA "s"
DB.create_language(:plperl)
# CREATE LANGUAGE plperl
DB.drop_language(:plperl)
# DROP LANGUAGE plperl
DB.create_function(:set_updated_at, <<-SQL, language: :plpgsql, returns: :trigger)
BEGIN
NEW.updated_at := CURRENT_TIMESTAMP;
RETURN NEW;
END;
SQL
# CREATE FUNCTION set_updated_at() RETURNS trigger LANGUAGE plpgsql AS '
# BEGIN
# NEW.updated_at := CURRENT_TIMESTAMP;
# RETURN NEW;
# END;'
DB.drop_function(:set_updated_at)
# DROP FUNCTION set_updated_at()
DB.create_trigger(:table, :trg_updated_at, :set_updated_at, events: :update, each_row: true, when: {Sequel[:new][:updated_at] => Sequel[:old][:updated_at]})
# CREATE TRIGGER trg_updated_at BEFORE UPDATE ON "table" FOR EACH ROW WHEN ("new"."updated_at" = "old"."updated_at") EXECUTE PROCEDURE set_updated_at()
DB.drop_trigger(:table, :trg_updated_at)
# DROP TRIGGER trg_updated_at ON "table"
However, you may want to consider just use Database#run with the necessary SQL code, at least for functions and triggers.
=== Parsing Check Constraints
Sequel has support for parsing CHECK constraints on PostgreSQL using Sequel::Database#check_constraints:
DB.create_table(:foo) do
Integer :i
Integer :j
constraint(:ic, Sequel[:i] > 2)
constraint(:jc, Sequel[:j] > 2)
constraint(:ijc, Sequel[:i] - Sequel[:j] > 2)
end
DB.check_constraints(:foo)
# => {
# :ic=>{:definition=>"CHECK ((i > 2))", :columns=>[:i]},
# :jc=>{:definition=>"CHECK ((j > 2))", :columns=>[:j]},
# :ijc=>{:definition=>"CHECK (((i - j) > 2))", :columns=>[:i, :j]}
# }
=== Parsing Foreign Key Constraints Referencing A Given Table
Sequel has support for parsing FOREIGN KEY constraints that reference a given table, using the +:reverse+
option to +foreign_key_list+:
DB.create_table!(:a) do
primary_key :id
Integer :i
Integer :j
foreign_key :a_id, :a, foreign_key_constraint_name: :a_a
unique [:i, :j]
end
DB.create_table!(:b) do
foreign_key :a_id, :a, foreign_key_constraint_name: :a_a
Integer :c
Integer :d
foreign_key [:c, :d], :a, key: [:j, :i], name: :a_c_d
end
DB.foreign_key_list(:a, reverse: true)
# => [
# {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:a, :schema=>:public},
# {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public},
# {:name=>:a_c_d, :columns=>[:c, :d], :key=>[:j, :i], :on_update=>:no_action, :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public}
# ]
== PostgreSQL-specific DML Support
=== Returning Rows From Insert, Update, and Delete Statements
Sequel supports the ability to return rows from insert, update, and delete statements, via
Dataset#returning:
DB[:table].returning.insert
# INSERT INTO "table" DEFAULT VALUES RETURNING *
DB[:table].returning(:id).delete
# DELETE FROM "table" RETURNING "id"
DB[:table].returning(:id, Sequel.*(:id, :id).as(:idsq)).update(id: 2)
# UPDATE "table" SET "id" = 2 RETURNING "id", ("id" * "id") AS "idsq"
When returning is used, instead of returning the number of rows affected (for updated/delete)
or the serial primary key value (for insert), it will return an array of hashes with the
returning results.
=== VALUES Support
Sequel offers support for the +VALUES+ statement using Database#values:
DB.values([[1,2],[2,3],[3,4]])
# VALUES (1, 2), (2, 3), (3, 4)
DB.values([[1,2],[2,3],[3,4]]).order(2, 1)
# VALUES (1, 2), (2, 3), (3, 4) ORDER BY 2, 1
DB.values([[1,2],[2,3],[3,4]]).order(2, 1).limit(1,2)
# VALUES (1, 2), (2, 3), (3, 4) ORDER BY 2, 1 LIMIT 1 OFFSET 2
=== INSERT ON CONFLICT Support
Starting with PostgreSQL 9.5, you can do an upsert or ignore unique or exclusion constraint
violations when inserting using Dataset#insert_conflict:
DB[:table].insert_conflict.insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT DO NOTHING
For compatibility with Sequel's MySQL support, you can also use +insert_ignore+:
DB[:table].insert_ignore.insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT DO NOTHING
You can pass a specific constraint name using +:constraint+, to only ignore a specific
constraint violation:
DB[:table].insert_conflict(constraint: :table_a_uidx).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT ON CONSTRAINT table_a_uidx DO NOTHING
If the unique or exclusion constraint covers the whole table (e.g. it isn't a partial unique
index), then you can just specify the column using the +:target+ option:
DB[:table].insert_conflict(target: :a).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO NOTHING
If you want to update the existing row instead of ignoring the constraint violation, you
can pass an +:update+ option with a hash of values to update. You must pass either the
+:target+ or +:constraint+ options when passing the +:update+ option:
DB[:table].insert_conflict(target: :a, update: {b: Sequel[:excluded][:b]}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO UPDATE SET b = excluded.b
If you want to update existing rows but using the current value of the column, you can build
the desired calculation using Sequel[]
DB[:table]
.insert_conflict(
target: :a,
update: {b: Sequel[:excluded][:b] + Sequel[:table][:a]}
)
.import([:a, :b], [ [1, 2] ])
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO UPDATE SET b = (excluded.b + table.a)
Additionally, if you only want to do the update in certain cases, you can specify an
+:update_where+ option, which will be used as a filter. If the row doesn't match the
conditions, the constraint violation will be ignored, but the row will not be updated:
DB[:table].insert_conflict(constraint::table_a_uidx,
update: {b: Sequel[:excluded][:b]},
update_where: {Sequel[:table][:status_id]=>1}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT ON CONSTRAINT table_a_uidx
# DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
=== INSERT OVERRIDING SYSTEM|USER VALUE Support
PostgreSQL 10+ supports identity columns, which are designed to replace the serial
columns previously used for autoincrementing primary keys. You can use
Dataset#overriding_system_value and Dataset#overriding_user_value to use this new
syntax:
DB.create_table(:table){primary_key :id}
# Ignore the given value for id, using the identity's sequence value.
DB[:table].overriding_user_value.insert(id: 1)
DB.create_table(:table){primary_key :id, identity: :always}
# Force the use of the given value for id, because otherwise the insert will
# raise an error, since GENERATED ALWAYS was used when creating the column.
DB[:table].overriding_system_value.insert(id: 1)
=== Distinct On Specific Columns
Sequel allows passing columns to Dataset#distinct, which will make the dataset return
rows that are distinct on just those columns:
DB[:table].distinct(:id).all
# SELECT DISTINCT ON ("id") * FROM "table"
=== JOIN USING table alias
Sequel allows passing an SQL::AliasedExpression to join table methods to use a USING
join with a table alias for the USING columns:
DB[:t1].join(:t2, Sequel.as([:c1, :c2], :alias))
# SELECT * FROM "t1" INNER JOIN "t2" USING ("c1", "c2") AS "alias"
=== Calling PostgreSQL 11+ Procedures postgres only
PostgreSQL 11+ added support for procedures, which are different from the user defined
functions that PostgreSQL has historically supported. These procedures are
called via a special +CALL+ syntax, and Sequel supports them via
Database#call_procedure:
DB.call_procedure(:foo, 1, "bar")
# CALL foo(1, 'bar')
Database#call_procedure will return a hash of return values if
the procedure returns a result, or +nil+ if the procedure does not return
a result.
=== Using a Cursor to Process Large Datasets postgres only
The postgres adapter offers a Dataset#use_cursor method to process large result sets
without keeping all rows in memory:
DB[:table].use_cursor.each{|row| }
# BEGIN;
# DECLARE sequel_cursor NO SCROLL CURSOR WITHOUT HOLD FOR SELECT * FROM "table";
# FETCH FORWARD 1000 FROM sequel_cursor
# FETCH FORWARD 1000 FROM sequel_cursor
# ...
# FETCH FORWARD 1000 FROM sequel_cursor
# CLOSE sequel_cursor
# COMMIT
This support is used by default when using Dataset#paged_each.
Using cursors, it is possible to update individual rows of a large dataset
easily using the rows_per_fetch: 1 option in conjunction with
Dataset#where_current_of. This is useful if the logic needed to
update the rows exists in the application and not in the database:
ds.use_cursor(rows_per_fetch: 1).each do |row|
ds.where_current_of.update(col: new_col_value(row))
end
=== Truncate Modifiers
Sequel supports PostgreSQL-specific truncate options:
DB[:table].truncate(cascade: true, only: true, restart: true)
# TRUNCATE TABLE ONLY "table" RESTART IDENTITY CASCADE
=== COPY Support postgres/pg and jdbc/postgres only
PostgreSQL's COPY feature is pretty much the fastest way to get data in or out of the database.
Sequel supports getting data out of the database via Database#copy_table, either for
a specific table or for an arbitrary dataset:
DB.copy_table(:table, format: :csv)
# COPY "table" TO STDOUT (FORMAT csv)
DB.copy_table(DB[:table], format: :csv)
# COPY (SELECT * FROM "table") TO STDOUT (FORMAT csv)
It supports putting data into the database via Database#copy_into:
DB.copy_into(:table, format: :csv, columns: [:column1, :column2], data: "1,2\n2,3\n")
# COPY "table"("column1", "column2") FROM STDIN (FORMAT csv)
=== Anonymous Function Execution
You can execute anonymous functions using a database procedural language via Database#do (the
plpgsql language is the default):
DB.do <<-SQL
DECLARE r record;
BEGIN
FOR r IN SELECT table_schema, table_name FROM information_schema.tables
WHERE table_type = 'VIEW' AND table_schema = 'public'
LOOP
EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser';
END LOOP;
END;
SQL
=== Listening On and Notifying Channels
You can use Database#notify to send notification to channels:
DB.notify(:channel)
# NOTIFY "channel"
postgres/pg only You can listen on channels via Database#listen. Note that
this blocks until the listening thread is notified:
DB.listen(:channel)
# LISTEN "channel"
# after notification received:
# UNLISTEN *
Note that +listen+ by default only listens for a single notification. If you want to loop and process
notifications:
DB.listen(:channel, loop: true){|channel| p channel}
The +pg_static_cache_updater+ extension uses this support to automatically update
the caches for models using the +static_cache+ plugin. Look at the documentation of that
plugin for details.
=== Locking Tables
Sequel makes it easy to lock tables, though it is generally better to let the database
handle locking:
DB[:table].lock('EXCLUSIVE') do
DB[:table].insert(id: DB[:table].max(:id)+1)
end
# BEGIN;
# LOCK TABLE "table" IN EXCLUSIVE MODE;
# SELECT max("id") FROM "table" LIMIT 1;
# INSERT INTO "table" ("id") VALUES (2) RETURNING NULL;
# COMMIT;
== Extended Error Info (postgres/pg only)
If you run a query that raises a Sequel::DatabaseError, you can pass the exception object to
Database#error_info, and that will return a hash with metadata regarding the error,
such as the related table and column or constraint.
DB.create_table(:test1){primary_key :id}
DB.create_table(:test2){primary_key :id; foreign_key :test1_id, :test1}
DB[:test2].insert(test1_id: 1) rescue DB.error_info($!)
# => {
# :schema=>"public",
# :table=>"test2",
# :column=>nil,
# :constraint=>"test2_test1_id_fkey",
# :type=>nil,
# :severity=>"ERROR",
# :sql_state=>"23503",
# :message_primary=>"insert or update on table \"test2\" violates foreign key constraint \"test2_test1_id_fkey\"",
# :message_detail=>"Key (test1_id)=(1) is not present in table \"test1\"."
# :message_hint=>nil,
# :statement_position=>nil,
# :internal_position=>nil,
# :internal_query=>nil,
# :source_file=>"ri_triggers.c",
# :source_line=>"3321",
# :source_function=>"ri_ReportViolation"
# }
== sequel_pg (postgres/pg only)
When the postgres adapter is used with the pg driver, Sequel automatically checks for sequel_pg, and
loads it if it is available. sequel_pg is a C extension that optimizes the fetching of rows, generally
resulting in a ~2x speedup. It is highly recommended to install sequel_pg if you are using the
postgres adapter with pg.
sequel_pg has additional optimizations when using the Dataset +map+, +as_hash+,
+to_hash_groups+, +select_hash+, +select_hash_groups+, +select_map+, and +select_order_map+ methods,
which avoids creating intermediate hashes and can add further speedups.
In addition to optimization, sequel_pg also adds streaming support if used on PostgreSQL 9.2+. Streaming
support is similar to using a cursor, but it is faster and more transparent.
You can enable the streaming support:
DB.extension(:pg_streaming)
Then you can stream individual datasets:
DB[:table].stream.each{|row| }
Or stream all datasets by default:
DB.stream_all_queries = true
When streaming is enabled, Dataset#paged_each will use streaming to implement
paging.
sequel-5.63.0/doc/prepared_statements.rdoc 0000664 0000000 0000000 00000012122 14342141206 0020565 0 ustar 00root root 0000000 0000000 = Prepared Statements and Bound Variables
Sequel has support for prepared statements and bound variables. No matter which
database you are using, the Sequel prepared statement/bound variable API remains
the same. There is native support for prepared statements/bound variables on
the following adapters:
* ibmdb (prepared statements only)
* jdbc
* mysql (server prepared statements using literalized connection variables)
* mysql2 (full support on 0.4+, otherwise server prepared statements using literalized connection variables)
* oracle (requires type specifiers for nil/NULL values)
* postgres (when using the pg driver)
* sqlite
* tinytds
Support on other adapters is emulated.
You can use the prepared_statements model plugin to automatically use prepared
statements for some common model actions such as saving or deleting a model
instance, or looking up a model based on a primary key.
== Placeholders
Generally, when using prepared statements (and certainly when using bound
variables), you need to put placeholders in your SQL to indicate where you
want your bound arguments to appear. Database support and syntax vary
significantly for placeholders (e.g. :name, $1, ?). Sequel abstracts all of
that and allows you to specify placeholders by using the :$name format for
placeholders, e.g.:
ds = DB[:items].where(name: :$n)
You can use these placeholders in most places where you can use the value
directly. For example, if you want to use placeholders while also using
raw SQL, you can do:
ds = DB["SELECT * FROM items WHERE name = ?", :$n]
== Bound Variables
Using bound variables for this query is simple:
ds.call(:select, n: 'Jim')
This will do the equivalent of selecting records that have the name 'Jim'. It
returns all records, and can take a block that is passed to Dataset#all.
Deleting or returning the first record works similarly:
ds.call(:first, n: 'Jim') # First record with name 'Jim'
ds.call(:delete, n: 'Jim') # Delete records with name 'Jim'
For inserting/updating records, you should also specify a value hash, which
may itself contain placeholders:
# Insert record with 'Jim', note that the previous filter is ignored
ds.call(:insert, {n: 'Jim'}, name: :$n)
# Change name to 'Bob' for all records with name of 'Jim'
ds.call(:update, {n: 'Jim', new_n: 'Bob'}, name: :$new_n)
== Prepared Statements
Prepared statement support is similar to bound variable support, but you
use Dataset#prepare with a name, and Dataset#call or Database#call later with the values:
ds = DB[:items].where(name: :$n)
ps = ds.prepare(:select, :select_by_name)
ps.call(n: 'Jim')
DB.call(:select_by_name, n: 'Jim') # same
The Dataset#prepare method returns a prepared statement, and also stores a
copy of the prepared statement in the database for later use. For insert
and update queries, the hash to insert/update is passed to +prepare+:
ps1 = DB[:items].prepare(:insert, :insert_with_name, name: :$n)
ps1.call(n: 'Jim')
DB.call(:insert_with_name, n: 'Jim') # same
ds = DB[:items].where(name: :$n)
ps2 = ds.prepare(:update, :update_name, name: :$new_n)
ps2.call(n: 'Jim', new_n: 'Bob')
DB.call(:update_name, n: 'Jim', new_n: 'Bob') # same
== Implementation Issues
Currently, creating a prepared statement uses Object#extend, which can hurt
performance. For high performance applications, it's recommended to create
all of your prepared statements upon application initialization, and not
to create prepared statements dynamically at runtime.
== Database support
=== PostgreSQL
If you are using the postgres-pr driver, PostgreSQL uses the
default emulated support. If you are using ruby-pg, there is native support
for both prepared statements and bound variables. Prepared statements are
always server side.
=== SQLite
SQLite supports both prepared statements and bound variables.
=== MySQL/Mysql2
The MySQL and Mysql2 <0.4 ruby drivers do not support bound variables, so the bound
variable methods are emulated. It uses server side prepared statements.
Mysql2 0.4+ supports both prepared statements and bound variables.
=== JDBC
JDBC supports both prepared statements and bound variables. Whether these
are server side or client side depends on the JDBC driver. For PostgreSQL
over JDBC, you can add the prepareThreshold=N parameter to the connection
string, which will use a server side prepared statement after N calls to
the prepared statement.
=== TinyTDS
Uses the sp_executesql stored procedure with bound variables, since
Microsoft SQL Server doesn't support true prepared statements.
=== IBM_DB
DB2 supports both prepared statements and bound variables.
=== Oracle
Oracle supports both prepared statements and bound variables. Prepared
statements (OCI8::Cursor objects) are cached per connection. If you
ever plan to use a nil/NULL value as a bound variable/prepared statement
value, you must specify the type in the placeholder using a __* suffix.
You can use any of the schema types that Sequel supports, such as
:$name__string or :$num__integer. Using blobs as bound variables is
not currently supported.
=== All Others
Support is emulated.
sequel-5.63.0/doc/querying.rdoc 0000664 0000000 0000000 00000106644 14342141206 0016374 0 ustar 00root root 0000000 0000000 = Querying in Sequel
This guide is based on http://guides.rubyonrails.org/active_record_querying.html
== Purpose of this Guide
Sequel is a flexible and powerful database library
that supports a wide variety of different querying methods. This guide
aims to be a introduction to Sequel's querying support.
While you can use raw SQL with Sequel, a large part of the
advantage you get from using Sequel is Sequel's ability to abstract
SQL from you and give you a pure-ruby interface. Sequel also ships with
a {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
which adds methods to core ruby types to work with Sequel.
== Retrieving Objects
Sequel provides a few separate methods for retrieving objects from the
database. The underlying method is Sequel::Dataset#each, which yields each
row as the Sequel::Database provides it. However, while Dataset#each can and
often is used directly, in many cases there is a more convenient retrieval
method you can use.
=== Sequel::Dataset
If you are new to Sequel and aren't familiar with Sequel, you should probably
read the {"Dataset Basics" guide}[rdoc-ref:doc/dataset_basics.rdoc],
then come back here.
=== Retrieving a Single Object
Sequel offers quite a few ways to to retrieve a single object.
==== Using a Primary Key [Sequel::Model]
The Sequel::Model.[] is the easiest method to use to find a model instance
by its primary key value:
# Find artist with primary key (id) 1
artist = Artist[1]
# SELECT * FROM artists WHERE (id = 1)
# => #"YJM", :id=>1}>
If there is no record with the given primary key, nil will be returned. If you want
to raise an exception if no record is found, you can use Sequel::Model.with_pk!:
artist = Artist.with_pk!(1)
==== Using +first+
If you want the first record in the dataset,
Sequel::Dataset#first is probably the most obvious method to use:
artist = Artist.first
# SELECT * FROM artists LIMIT 1
# => #"YJM", :id=>1}>
Any options you pass to +first+ will be used as a filter:
artist = Artist.first(name: 'YJM')
# SELECT * FROM artists WHERE (name = 'YJM') LIMIT 1
# => #"YJM", :id=>1}>
artist = Artist.first(Sequel.like(:name, 'Y%'))
# SELECT * FROM artists WHERE (name LIKE 'Y%' ESCAPE '\') LIMIT 1
# => #"YJM", :id=>1}>
If there is no matching row, +first+ will return nil. If you want to
raise an exception instead, use first!.
Sequel::Dataset#[] is basically an alias for +first+, except it
requires an argument:
DB[:artists][{name: 'YJM'}]
# SELECT * FROM artists WHERE (name = 'YJM') LIMIT 1
# => {:name=>"YJM", :id=>1}
Note that while Model.[] allows you to pass a primary key directly,
Dataset#[] does not (unless it is a model dataset).
==== Using +last+
If you want the last record in the dataset,
Sequel::Dataset#last is an obvious method to use. +last+ requires the
dataset be ordered, unless the dataset is a model dataset in which case +last+
will do a reverse order by the primary key field:
artist = Artist.last
# SELECT * FROM artists ORDER BY id DESC LIMIT 1
# => #"YJM", :id=>1}>
Note:
1. +last+ is equivalent to running a +reverse.first+ query, in other words it reverses the order of the dataset and then calls +first+. This is why +last+ raises a Sequel::Error when there is no order on a plain dataset - because it will provide the same record as +first+, and most users will find that confusing.
2. +last+ is not necessarily going to give you the last record in the dataset unless you give the dataset an unambiguous order.
3. +last+ will ignore +limit+ if chained together in a query because it sets a limit of 1 if no arguments are given.
==== Retrieving a Single Column Value
Sometimes, instead of wanting an entire row, you only want the value of
a specific column. For this Sequel::Dataset#get is the method
you want:
artist_name = Artist.get(:name)
# SELECT name FROM artists LIMIT 1
# => "YJM"
==== Retrieving a Multiple Column Values
If you want the value for multiple columns, you can pass an array to
Sequel::Dataset#get:
artist_id, artist_name = Artist.get([:id, :name])
# SELECT id, name FROM artists LIMIT 1
# => [1, "YJM"]
=== Retrieving Multiple Objects
==== As an Array of Hashes or Model Objects
In many cases, you want an array of all of the rows associated with the
dataset, in which case Sequel::Dataset#all is the method you
want to use:
artists = Artist.all
# SELECT * FROM artists
# => [#"YJM", :id=>1}>,
# #"AS", :id=>2}>]
==== Using an Enumerable Interface
Sequel::Dataset uses an Enumerable Interface, so it provides a
method named each that yields hashes or model objects as they are retrieved
from the database:
Artist.each{|x| p x.name}
# SELECT * FROM artists
"YJM"
"AS"
This means that all of the methods in the Enumerable module are available,
such as +map+:
artist_names = Artist.map{|x| x.name}
# SELECT * FROM artists
# => ["YJM", "AS"]
==== As an Array of Column Values
Sequel also has an extended +map+ method that takes an argument. If you
provide an argument to +map+, it will return an array of values for the
given column. So the previous example can be handled more easily with:
artist_names = Artist.map(:name)
# SELECT * FROM artists
# => ["YJM", "AS"]
One difference between these two ways of returning an array of values is
that providing +map+ with an argument is really doing:
artist_names = Artist.map{|x| x[:name]} # not x.name
Note that regardless of whether you provide +map+ with an argument, it
does not modify the columns selected. If you only want to select a
single column and return an array of the columns values, you can use
+select_map+:
artist_names = Artist.select_map(:name)
# SELECT name FROM artists
# => ["YJM", "AS"]
It's also common to want to order such a map, so Sequel provides a
+select_order_map+ method as well:
artist_names = Artist.select_order_map(:name)
# SELECT name FROM artists ORDER BY name
# => ["AS", "YJM"]
In all of these cases, you can provide an array of column symbols and
an array of arrays of values will be returned:
artist_names = Artist.select_map([:id, :name])
# SELECT id, name FROM artists
# => [[1, "YJM"], [2, "AS"]]
==== As a Hash
Sequel makes it easy to take an SQL query and return it as a ruby hash,
using the +as_hash+ method:
artist_names = Artist.as_hash(:id, :name)
# SELECT * FROM artists
# => {1=>"YJM", 2=>"AS"}
As you can see, the +as_hash+ method uses the first symbol as the key
and the second symbol as the value. So if you swap the two arguments the hash
will have its keys and values transposed:
artist_names = Artist.as_hash(:name, :id)
# SELECT * FROM artists
# => {"YJM"=>1, "AS"=>2}
Now what if you have multiple values for the same key? By default, +as_hash+
will just have the last matching value. If you care about all matching values,
use +to_hash_groups+, which makes the values of the array an array of matching
values, in the order they were received:
artist_names = Artist.to_hash_groups(:name, :id)
# SELECT * FROM artists
# => {"YJM"=>[1, 10, ...], "AS"=>[2, 20, ...]}
If you only provide one argument to +as_hash+, it uses the entire hash
or model object as the value:
artist_names = DB[:artists].as_hash(:name)
# SELECT * FROM artists
# => {"YJM"=>{:id=>1, :name=>"YJM"}, "AS"=>{:id=>2, :name=>"AS"}}
and +to_hash_groups+ works similarly:
artist_names = DB[:artists].to_hash_groups(:name)
# SELECT * FROM artists
# => {"YJM"=>[{:id=>1, :name=>"YJM"}, {:id=>10, :name=>"YJM"}], ...}
Model datasets have a +as_hash+ method that can be called without any
arguments, in which case it will use the primary key as the key and
the model object as the value. This can be used to easily create an
identity map:
artist_names = Artist.as_hash
# SELECT * FROM artists
# => {1=>#1, :name=>"YGM"}>,
# 2=>#2, :name=>"AS"}>}
There is no equivalent handling to +to_hash_groups+, since there would
only be one matching record, as the primary key must be unique.
Note that +as_hash+ never modifies the columns selected. However, just
like Sequel has a +select_map+ method to modify the columns selected and
return an array, Sequel also has a +select_hash+ method to modify the
columns selected and return a hash:
artist_names = Artist.select_hash(:name, :id)
# SELECT name, id FROM artists
# => {"YJM"=>1, "AS"=>2}
Likewise, +select_hash_groups+ also exists:
artist_names = Artist.select_hash_groups(:name, :id)
# SELECT name, id FROM artists
# => {"YJM"=>[1, 10, ...], "AS"=>[2, 20, ...]}
== Modifying datasets
Note that the retrieval methods discussed above just return
the row(s) included in the existing dataset. In most cases,
you aren't interested in every row in a table, but in a subset
of the rows, based on some criteria. In Sequel, filtering
the dataset is generally done separately than retrieving
the records.
There are really two types of dataset methods that you will
be using:
1. Methods that return row(s), discussed above
2. Methods that return modified datasets, discussed below
Sequel datasets are frozen and use a method chaining, functional style API
that returns modified datasets. Let's start with a simple example.
This is a basic dataset that includes all records in the
table +artists+:
ds1 = DB[:artists]
# SELECT * FROM artists
Let's say we are only interested in the artists whose names
start with "A":
ds2 = ds1.where(Sequel.like(:name, 'A%'))
# SELECT * FROM artists WHERE (name LIKE 'A%' ESCAPE '\')
Here we see that +where+ returns a dataset that adds a +WHERE+
clause to the query. It's important to note that +where+ does
not modify the receiver:
ds1
# SELECT * FROM artists
ds2
# SELECT * FROM artists WHERE (name LIKE 'A%' ESCAPE '\')
In Sequel, dataset methods do not modify the dataset itself, so you can freely use the dataset in multiple
places without worrying that its usage in one place will affect its usage
in another place. This is what is meant by a functional style API.
Let's say we only want to select the id and name columns, and that
we want to order by name:
ds3 = ds2.order(:name).select(:id, :name)
# SELECT id, name FROM artists WHERE (name LIKE 'A%' ESCAPE '\') ORDER BY name
Note how you don't need to assign the returned value of order to a variable,
and then call select on that. Because order just returns a dataset, you can
call select directly on the returned dataset. This is what is meant by a
method chaining API.
Also note how you can call methods that modify different clauses in any order.
In this case, the WHERE clause was added first, then the ORDER clause, then the
SELECT clause was modified. This makes for a flexible API, where you can modify
any part of the query at any time.
== Filters
Filtering is probably the most common dataset modifying action done in Sequel.
Both the +where+ and +filter+ methods filter the dataset by modifying the
dataset's WHERE clause. Both accept a wide variety of input formats, discussed
below.
=== Hashes
The most common format for providing filters is via a hash. In general, Sequel
treats conditions specified with a hash as equality, inclusion, or identity. What type
of condition is used depends on the values in the hash.
Unless Sequel has special support for the value's class, it uses a simple
equality statement:
Artist.where(id: 1)
# SELECT * FROM artists WHERE (id = 1)
Artist.where(name: 'YJM')
# SELECT * FROM artists WHERE (name = 'YJM')
For arrays, Sequel uses the IN operator with a value list:
Artist.where(id: [1, 2])
# SELECT * FROM artists WHERE (id IN (1, 2))
For datasets, Sequel uses the IN operator with a subselect:
Artist.where(id: Album.select(:artist_id))
# SELECT * FROM artists WHERE (id IN (
# SELECT artist_id FROM albums))
For boolean values such as nil, true, and false, Sequel uses the IS operator:
Artist.where(id: nil)
# SELECT * FROM artists WHERE (id IS NULL)
For ranges, Sequel uses a pair of inequality statements:
Artist.where(id: 1..5)
# SELECT * FROM artists WHERE ((id >= 1) AND (id <= 5))
Artist.where(id: 1...5)
# SELECT * FROM artists WHERE ((id >= 1) AND (id < 5))
Finally, for regexps, Sequel uses an SQL regular expression. Note that this
is only supported by default on PostgreSQL and MySQL. It can also be supported
on SQLite when using the sqlite adapter with the :setup_regexp_function
Database option.
Artist.where(name: /JM$/)
# SELECT * FROM artists WHERE (name ~ 'JM$')
If there are multiple arguments in the hash, the filters are ANDed together:
Artist.where(id: 1, name: /JM$/)
# SELECT * FROM artists WHERE ((id = 1) AND (name ~ 'JM$'))
This works the same as if you used two separate +where+ calls:
Artist.where(id: 1).where(name: /JM$/)
# SELECT * FROM artists WHERE ((id = 1) AND (name ~ 'JM$'))
=== Array of Two Element Arrays
If you use an array of two element arrays, it is treated as a hash. The only
advantage to using an array of two element arrays is that it allows you to
duplicate keys, so you can do:
Artist.where([[:name, /JM$/], [:name, /^YJ/]])
# SELECT * FROM artists WHERE ((name ~ 'JM$')) AND ((name ~ '^YJ'))
=== Virtual Row Blocks
If a block is passed to a filter, it is treated as a virtual row block:
Artist.where{id > 5}
# SELECT * FROM artists WHERE (id > 5)
You can learn more about virtual row blocks in the {"Virtual Rows" guide}[rdoc-ref:doc/virtual_rows.rdoc].
You can provide both regular arguments and a block, in which case the results
will be ANDed together:
Artist.where(name: 'A'...'M'){id > 5}
# SELECT * FROM artists WHERE ((name >= 'A') AND (name < 'M') AND (id > 5))
Using virtual row blocks, what you can do with single entry hash or an array with
a single two element array can also be done using the =~ method:
Artist.where{id =~ 5}
# SELECT * FROM artists WHERE (id = 5)
=== Symbols
If you have a boolean column in the database, and you want only true
values, you can just provide the column symbol to filter:
Artist.where(:retired)
# SELECT * FROM artists WHERE retired
=== SQL::Expression
Sequel has a DSL that allows easily creating SQL expressions. These SQL
expressions are instances of subclasses of Sequel::SQL::Expression. You've
already seen an example earlier:
Artist.where(Sequel.like(:name, 'Y%'))
# SELECT * FROM artists WHERE name LIKE 'Y%' ESCAPE '\'
In this case Sequel.like returns a Sequel::SQL::BooleanExpression object,
which is used directly in the filter.
You can use the DSL to create arbitrarily complex expressions. SQL::Expression
objects can be created via singleton methods on the Sequel module. The most common
method is Sequel.[], which takes any object and wraps it in a SQL::Expression
object. In most cases, the SQL::Expression returned supports the & operator for
+AND+, the | operator for +OR+, and the ~ operator for inversion:
Artist.where(Sequel.like(:name, 'Y%') & (Sequel[{b: 1}] | Sequel.~(c: 3)))
# SELECT * FROM artists WHERE ((name LIKE 'Y%' ESCAPE '\') AND ((b = 1) OR (c != 3)))
You can combine these expression operators with the virtual row support:
Artist.where{(a > 1) & ~((b(c) < 1) | d)}
# SELECT * FROM artists WHERE ((a > 1) AND (b(c) >= 1) AND NOT d)
Note the use of parentheses when using the & and | operators, as they have lower
precedence than other operators. The following will not work:
Artist.where{a > 1 & ~(b(c) < 1 | d)}
# Raises a TypeError
=== Strings with Placeholders
Assuming you want to get your hands dirty and use SQL fragments in filters, Sequel allows you
to do so if you explicitly mark the strings as literal strings using +Sequel.lit+. You can
use placeholders in the string and pass arguments for the placeholders:
Artist.where(Sequel.lit("name LIKE ?", 'Y%'))
# SELECT * FROM artists WHERE (name LIKE 'Y%')
This is the most common type of placeholder, where each question mark is substituted
with the next argument:
Artist.where(Sequel.lit("name LIKE ? AND id = ?", 'Y%', 5))
# SELECT * FROM artists WHERE (name LIKE 'Y%' AND id = 5)
You can also use named placeholders with a hash, where the named placeholders use
colons before the placeholder names:
Artist.where(Sequel.lit("name LIKE :name AND id = :id", name: 'Y%', id: 5))
# SELECT * FROM artists WHERE (name LIKE 'Y%' AND id = 5)
You don't have to provide any placeholders if you don't want to:
Artist.where(Sequel.lit("id = 2"))
# SELECT * FROM artists WHERE id = 2
However, if you are using any untrusted input, you should definitely be using placeholders.
In general, unless you are hardcoding values in the strings, you should use placeholders.
You should never pass a string that has been built using interpolation, unless you are
sure of what you are doing.
Artist.where(Sequel.lit("id = #{params[:id]}")) # Don't do this!
Artist.where(Sequel.lit("id = ?", params[:id])) # Do this instead
Artist.where(id: params[:id].to_i) # Even better
=== Inverting
You may be wondering how to specify a not equals condition in Sequel, or the NOT IN
operator. Sequel has generic support for inverting conditions, so to write a not
equals condition, you write an equals condition, and invert it:
Artist.where(id: 5).invert
# SELECT * FROM artists WHERE (id != 5)
Note that +invert+ inverts the entire filter:
Artist.where(id: 5).where{name > 'A'}.invert
# SELECT * FROM artists WHERE ((id != 5) OR (name <= 'A'))
In general, +invert+ is used rarely, since +exclude+ allows you to invert only specific
filters:
Artist.exclude(id: 5)
# SELECT * FROM artists WHERE (id != 5)
Artist.where(id: 5).exclude{name > 'A'}
# SELECT * FROM artists WHERE ((id = 5) AND (name <= 'A')
So to do a NOT IN with an array:
Artist.exclude(id: [1, 2])
# SELECT * FROM artists WHERE (id NOT IN (1, 2))
Or to use the NOT LIKE operator:
Artist.exclude(Sequel.like(:name, '%J%'))
# SELECT * FROM artists WHERE (name NOT LIKE '%J%' ESCAPE '\')
You can use Sequel.~ to negate expressions:
Artist.where(Sequel.~(id: 5))
# SELECT * FROM artists WHERE id != 5
On Sequel expression objects, you can use ~ to negate them:
Artist.where(~Sequel.like(:name, '%J%'))
# SELECT * FROM artists WHERE (name NOT LIKE '%J%' ESCAPE '\')
You can use !~ on Sequel expressions to create negated expressions:
Artist.where{id !~ 5}
# SELECT * FROM artists WHERE (id != 5)
=== Removing
To remove all existing filters, use +unfiltered+:
Artist.where(id: 1).unfiltered
# SELECT * FROM artists
== Ordering
Sequel offers quite a few methods to manipulate the SQL ORDER BY clause. The
most basic of these is +order+:
Artist.order(:id)
# SELECT * FROM artists ORDER BY id
You can specify multiple arguments to order by more than one column:
Album.order(:artist_id, :id)
# SELECT * FROM album ORDER BY artist_id, id
Note that unlike +where+, +order+ replaces an existing order, it does not
append to an existing order:
Artist.order(:id).order(:name)
# SELECT * FROM artists ORDER BY name
If you want to add a column to the end of the existing order:
Artist.order(:id).order_append(:name)
# SELECT * FROM artists ORDER BY id, name
If you want to add a column to the beginning of the existing order:
Artist.order(:id).order_prepend(:name)
# SELECT * FROM artists ORDER BY name, id
=== Reversing
Just like you can invert an existing filter, you can reverse an existing
order, using +reverse+ without an order:
Artist.order(:id).reverse
# SELECT FROM artists ORDER BY id DESC
Alternatively, you can provide reverse with the order:
Artist.reverse(:id)
# SELECT FROM artists ORDER BY id DESC
To specify a single entry be reversed, Sequel.desc can be used:
Artist.order(Sequel.desc(:id))
# SELECT FROM artists ORDER BY id DESC
This allows you to easily use both ascending and descending orders:
Artist.order(:name, Sequel.desc(:id))
# SELECT FROM artists ORDER BY name, id DESC
=== Removing
Just like you can remove filters with +unfiltered+, you can remove
orders with +unordered+:
Artist.order(:name).unordered
# SELECT * FROM artists
== Selected Columns
Sequel offers a few methods to manipulate the columns selected. As
you may be able to guess, the main method used is +select+:
Artist.select(:id, :name)
# SELECT id, name FROM artists
You just specify all of the columns that you are selecting as
arguments to the method.
If you are dealing with model objects, you'll want to include the
primary key if you want to update or destroy the object. You'll
also want to include any keys (primary or foreign) related to
associations you plan to use.
If a column is not selected, and you attempt to access it, you will
get nil:
artist = Artist.select(:name).first
# SELECT name FROM artists LIMIT 1
artist[:id]
# => nil
Like +order+, +select+ replaces the existing selected columns:
Artist.select(:id).select(:name)
# SELECT name FROM artists
To add to the existing selected columns, use +select_append+:
Artist.select(:id).select_append(:name)
# SELECT id, name FROM artists
To remove specifically selected columns, and default back to all
columns, use +select_all+:
Artist.select(:id).select_all
# SELECT * FROM artists
To select all columns from a given table, provide an argument to
+select_all+:
Artist.select_all(:artists)
# SELECT artists.* FROM artists
=== Distinct
To treat duplicate rows as a single row when retrieving the records,
use +distinct+:
Artist.distinct.select(:name)
# SELECT DISTINCT name FROM artists
Note that DISTINCT is a separate SQL clause, it's not a function
that you pass to select.
== Limit and Offset
You can limit the dataset to a given number of rows using +limit+:
Artist.limit(5)
# SELECT * FROM artists LIMIT 5
You can provide a second argument to +limit+ to specify an offset:
Artist.limit(5, 10)
# SELECT * FROM artists LIMIT 5 OFFSET 10
You can also call the +offset+ method separately:
Artist.limit(5).offset(10)
# SELECT * FROM artists LIMIT 5 OFFSET 10
Either of these would return the 11th through 15th records in the original
dataset.
To remove a limit and offset from a dataset, use +unlimited+:
Artist.limit(5, 10).unlimited
# SELECT * FROM artists
== Grouping
The SQL GROUP BY clause is used to combine multiple rows based on
the values of a given group of columns.
To modify the GROUP BY clause of the SQL statement, you use +group+:
Album.group(:artist_id)
# SELECT * FROM albums GROUP BY artist_id
You can remove an existing grouping using +ungrouped+:
Album.group(:artist_id).ungrouped
# SELECT * FROM albums
If you want to add a column to the end of the existing grouping columns:
Album.group(:artist_id).group_append(:name)
# SELECT * FROM albums GROUP BY artist_id, name
A common use of grouping is to count based on the number of grouped rows,
and Sequel provides a +group_and_count+ method to make this easier:
Album.group_and_count(:artist_id)
# SELECT artist_id, count(*) AS count FROM albums GROUP BY artist_id
This will return the number of albums for each artist_id.
If you want to select and group on the same columns, you can use +select_group+:
Album.select_group(:artist_id)
# SELECT artist_id FROM albums GROUP BY artist_id
Usually you would add a +select_append+ call after that, to add some sort of
aggregation:
Album.select_group(:artist_id).select_append{sum(num_tracks).as(tracks)}
# SELECT artist_id, sum(num_tracks) AS tracks FROM albums GROUP BY artist_id
== Having
The SQL HAVING clause is similar to the WHERE clause, except that
filters the results after the grouping has been applied, instead of
before. One possible use is if you only wanted to return artists
who had at least 10 albums:
Album.group_and_count(:artist_id).having{count.function.* >= 10}
# SELECT artist_id, count(*) AS count FROM albums
# GROUP BY artist_id HAVING (count(*) >= 10)
Both the WHERE clause and the HAVING clause are removed by +unfiltered+:
Album.group_and_count(:artist_id).having{count.function.* >= 10}.
where(:name.like('A%')).unfiltered
# SELECT artist_id, count(*) AS count FROM albums GROUP BY artist_id
== Joins
Sequel has support for many different SQL join types.
The underlying method used is +join_table+:
Album.join_table(:inner, :artists, id: :artist_id)
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
In most cases, you won't call +join_table+ directly, as Sequel provides
shortcuts for all common (and most uncommon) join types. For example
+join+ does an inner join:
Album.join(:artists, id: :artist_id)
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
And +left_join+ does a LEFT JOIN:
Album.left_join(:artists, id: :artist_id)
# SELECT * FROM albums
# LEFT JOIN artists ON (artists.id = albums.artist_id)
=== Table/Dataset to Join
For all of these specialized join methods, the first argument is
generally the name of the table to which you are joining. However, you
can also provide a dataset, in which case a subselect is used:
Album.join(Artist.where{name < 'A'}, id: :artist_id)
# SELECT * FROM albums
# INNER JOIN (SELECT * FROM artists WHERE (name < 'A')) AS t1
# ON (t1.id = albums.artist_id)
=== Join Conditions
The second argument to the specialized join methods is the conditions
to use when joining, which is similar to a filter expression, with
a few minor exceptions.
==== Implicit Qualification
A hash used as the join conditions operates similarly to a filter,
except that unqualified symbol keys are automatically qualified
with the table from the first argument, and unqualified symbol values
are automatically qualified with the last table joined (or the first
table in the dataset if there hasn't been a previous join):
Album.join(:artists, id: :artist_id)
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
Note how the +id+ symbol is automatically qualified with +artists+,
while the +artist_id+ symbol is automatically qualified with +albums+.
Because Sequel uses the last joined table for implicit qualifications
of values, you can do things like:
Album.join(:artists, id: :artist_id).
join(:members, artist_id: :id)
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
# INNER JOIN members ON (members.artist_id = artists.id)
Note that when joining to the +members+ table, +artist_id+ is qualified
with +members+ and +id+ is qualified with +artists+.
While a good default, implicit qualification is not always correct:
Album.join(:artists, id: :artist_id).
join(:tracks, album_id: :id)
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
# INNER JOIN tracks ON (tracks.album_id = artists.id)
Note here how +id+ is qualified with +artists+ instead of +albums+. This
is wrong as the foreign key tracks.album_id refers to albums.id, not
artists.id. To fix this, you need to explicitly qualify when joining:
Album.join(:artists, id: :artist_id).
join(:tracks, album_id: Sequel[:albums][:id])
# SELECT * FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
# INNER JOIN tracks ON (tracks.album_id = albums.id)
Just like in filters, an array of two element arrays is treated the same
as a hash, but allows for duplicate keys:
Album.join(:artists, [[:id, :artist_id], [:id, 1..5]])
# SELECT * FROM albums INNER JOIN artists
# ON ((artists.id = albums.artist_id)
# AND (artists.id >= 1) AND (artists.id <= 5))
And just like in the hash case, unqualified symbol elements in the
array are implicitly qualified.
By default, Sequel only qualifies unqualified symbols in the conditions. However,
You can provide an options hash with a qualify: :deep option to do a deep
qualification, which can qualify subexpressions. For example, let's say you are doing
a JOIN using case insensitive string comparison:
Album.join(:artists, {Sequel.function(:lower, :name) =>
Sequel.function(:lower, :artist_name)},
qualify: :deep)
# SELECT * FROM albums INNER JOIN artists
# ON (lower(artists.name) = lower(albums.artist_name))
Note how the arguments to lower were qualified correctly in both cases.
==== USING Joins
The most common type of join conditions is a JOIN ON, as displayed
above. However, the SQL standard allows for join conditions to be
specified with JOIN USING, assuming the column name is the same in
both tables.
For example, if instead of having a primary
column named +id+ in all of your tables, you use +artist_id+ in your
+artists+ table and +album_id+ in your +albums+ table, you could do:
Album.join(:artists, [:artist_id])
# SELECT * FROM albums INNER JOIN artists USING (artist_id)
See here how you specify the USING columns as an array of symbols.
==== NATURAL Joins
NATURAL joins take it one step further than USING joins, by assuming
that all columns with the same names in both tables should be
used for joining:
Album.natural_join(:artists)
# SELECT * FROM albums NATURAL JOIN artists
In this case, you don't even need to specify any conditions.
==== Join Blocks
You can provide a block to any of the join methods that accept
conditions. This block should accept 3 arguments: the table alias
for the table currently being joined, the table alias for the last
table joined (or first table), and an array of previous
Sequel::SQL::JoinClauses.
This allows you to qualify columns similar to how the implicit
qualification works, without worrying about the specific aliases
being used. For example, let's say you wanted to join the albums
and artists tables, but only want albums where the artist's name
comes before the album's name.
Album.join(:artists, id: :artist_id) do |j, lj, js|
Sequel[j][:name] < Sequel[lj][:name]
end
# SELECT * FROM albums INNER JOIN artists
# ON ((artists.id = albums.artist_id)
# AND (artists.name < albums.name))
Because greater than can't be expressed with a hash in Sequel, you
need to use a block and qualify the tables manually.
== From
In general, the FROM table is the first clause populated when creating
a dataset. For a standard Sequel::Model, the dataset already has the
FROM clause populated, and the most common way to create datasets is
with the Database#[] method, which populates the FROM clause.
However, you can modify the tables you are selecting FROM using +from+:
Album.from(:albums, :old_albums)
# SELECT * FROM albums, old_albums
Be careful with this, as multiple tables in the FROM clause use a cross
join by default, so the number of rows will be number of albums times the
number of old albums.
Using multiple FROM tables and setting conditions in the WHERE clause is
an old-school way of joining tables:
DB.from(:albums, :artists).where{{artists[:id]=>albums[:artist_id]}}
# SELECT * FROM albums, artists WHERE (artists.id = albums.artist_id)
=== Using the current dataset in a subselect
In some cases, you may want to wrap the current dataset in a subselect.
Here's an example using +from_self+:
Album.order(:artist_id).limit(100).from_self.group(:artist_id)
# SELECT * FROM (SELECT * FROM albums ORDER BY artist_id LIMIT 100)
# AS t1 GROUP BY artist_id
This is different than without +from_self+:
Album.order(:artist_id).limit(100).group(:artist_id)
# SELECT * FROM albums GROUP BY artist_id ORDER BY name LIMIT 100
Without +from_self+, you are doing the grouping, and limiting the number
of grouped records returned to 100. So assuming you have albums by more
than 100 artists, you'll end up with 100 results.
With +from_self+, you are limiting the number of records before grouping.
So if the artist with the lowest id had 100 albums, you'd get 1 result,
not 100.
== Locking for Update
Sequel allows you to easily add a FOR UPDATE clause to your queries so
that the records returned can't be modified by another query until the
current transaction commits. You just use the +for_update+ dataset
method when returning the rows:
DB.transaction do
album = Album.for_update.first(id: 1)
# SELECT * FROM albums WHERE (id = 1) FOR UPDATE
album.num_tracks += 1
album.save
end
This will ensure that no other connection modifies the row between when you select
it and when the transaction ends.
=== Optimistic Locking
One of the model plugins that ships with Sequel is an optimistic locking plugin, which provides
a database independent way to detect and raise an error if two different connections
modify the same row. It's useful for things like web forms where you cannot keep a
transaction open while the user is looking at the form, because of the web's
stateless nature.
== Custom SQL
Sequel makes it easy to use custom SQL for the query by providing it to the Database#[]
method as a string:
DB["SELECT * FROM artists"]
# SELECT * FROM artists
You can also use the +with_sql+ dataset method to return a dataset that uses that
exact SQL:
DB[:albums].with_sql("SELECT * FROM artists")
# SELECT * FROM artists
With either of these methods, you can use placeholders:
DB["SELECT * FROM artists WHERE id = ?", 5]
# SELECT * FROM artists WHERE id = 5
DB[:albums].with_sql("SELECT * FROM artists WHERE id = :id", id: 5)
# SELECT * FROM artists WHERE id = 5
Note that if you specify the dataset using custom SQL, you can still call the dataset
modification methods, but in many cases they will appear to have no affect:
DB["SELECT * FROM artists"].select(:name).order(:id)
# SELECT * FROM artists
You can use the implicit_subquery extension to automatically wrap queries that use
custom SQL in subqueries if a method is called that would modify the SQL:
DB.extension :implicit_subquery
DB["SELECT * FROM artists"].select(:name).order(:id)
# SELECT name FROM (SELECT * FROM artists) AS t1 ORDER BY id"
If you must drop down to using custom SQL, it's recommended that you only do so for
specific parts of a query. For example, if the reason you are using custom SQL is
to use a custom operator in the database in the SELECT clause:
DB["SELECT name, (foo !@# ?) AS baz FROM artists", 'bar']
it's better to use Sequel's DSL, and use a literal string for the custom operator:
DB[:artists].select(:name, Sequel.lit("(foo !@# ?)", 'bar').as(:baz))
That way Sequel's method chaining still works, and it increases Sequel's ability to
introspect the code.
== Checking for Records
If you just want to know whether the current dataset would return any rows, use empty?:
Album.empty?
# SELECT 1 FROM albums LIMIT 1
# => false
Album.where(id: 0).empty?
# SELECT 1 FROM albums WHERE (id = 0) LIMIT 1
# => true
Album.where(Sequel.like(:name, 'R%')).empty?
# SELECT 1 FROM albums WHERE (name LIKE 'R%' ESCAPE '\') LIMIT 1
# => false
== Aggregate Calculations
The SQL standard defines a few helpful methods to get aggreate information about
datasets, such as +count+, +sum+, +avg+, +min+, and +max+. There are dataset methods
for each of these aggregate functions.
+count+ just returns the number of records in the dataset.
Album.count
# SELECT count(*) AS count FROM albums LIMIT 1
# => 2
If you pass an expression to count, it will return the number of records where
that expression in not NULL:
Album.count(:artist_id)
# SELECT count(artist_id) AS count FROM albums LIMIT 1
# => 1
The other methods take a column argument and call the aggregate function with
the argument:
Album.sum(:id)
# SELECT sum(id) AS sum FROM albums LIMIT 1
# => 3
Album.avg(:id)
# SELECT avg(id) AS avg FROM albums LIMIT 1
# => 1.5
Album.min(:id)
# SELECT min(id) AS min FROM albums LIMIT 1
# => 1
Album.max(:id)
# SELECT max(id) AS max FROM albums LIMIT 1
# => 2
sequel-5.63.0/doc/reflection.rdoc 0000664 0000000 0000000 00000013336 14342141206 0016656 0 ustar 00root root 0000000 0000000 = Reflection
Sequel supports reflection information in multiple ways.
== Adapter in Use
You can get the adapter in use using Database#adapter_scheme:
DB.adapter_scheme # :postgres, :jdbc, :odbc
== Database Connected To
In some cases, the adapter scheme will be the same as the database to which you are connecting. However, many adapters support multiple databases. You can use the Database#database_type method to get the type of database to which you are connecting:
DB.database_type # :postgres, :h2, :mssql
== Tables in the Database
Database#tables gives an array of table name symbols:
DB.tables # [:table1, :table2, :table3, ...]
== Views in the Database
Database#views and gives an array of view name symbols:
DB.views # [:view1, :view2, :view3, ...]
== Indexes on a table
Database#indexes takes a table name gives a hash of index information. Keys are index names, values are subhashes with the keys :columns and :unique :
DB.indexes(:table1) # {:index1=>{:columns=>[:column1], :unique=>false}, :index2=>{:columns=>[:column2, :column3], :unique=>true}}
Index information generally does not include partial indexes, functional indexes, or indexes on the primary key of the table.
== Foreign Key Information for a Table
Database#foreign_key_list takes a table name and gives an array of hashes of foreign key information:
DB.foreign_key_list(:table1) # [{:columns=>[:column1], :table=>:referenced_table, :key=>[:referenced_column1]}]
At least the following entries will be present in the hash:
:columns :: An array of columns in the given table
:table :: The table referenced by the columns
:key :: An array of columns referenced (in the table specified by :table), but can be nil on certain adapters
if the primary key is referenced.
The hash may also contain entries for:
:deferrable :: Whether the constraint is deferrable
:name :: The name of the constraint
:on_delete :: The action to take ON DELETE
:on_update :: The action to take ON UPDATE
== Column Information for a Table
Database#schema takes a table symbol and returns column information in an array with each element being an array with two elements. The first elements of the subarray is a column symbol, and the second element is a hash of information about that column. The hash should include the following keys:
:allow_null :: Whether NULL/nil is an allowed value for this column. Used by the Sequel::Model typecasting code.
:db_type :: The type of column the database provided, as a string. Used by the schema_dumper plugin for a more specific type translation.
:default :: The default value of the column, as either a string or nil. Uses a database specific format. Used by the schema_dumper plugin for converting to a ruby value.
:primary_key :: Whether this column is one of the primary key columns for the table. Used by the Sequel::Model code to determine primary key columns.
:ruby_default :: The default value of the column as a ruby object, or nil if there is no default or the default could not be successfully parsed into a ruby object.
:type :: The type of column, as a symbol (e.g. :string). Used by the Sequel::Model typecasting code.
Example:
DB.schema(:table) # [[:column1, {:allow_null=>true, :db_type=>'varchar(255)', :default=>'blah', :primary_key=>false, :type=>:string}], ...]
== Column Information for a Model
Model#db_schema returns pretty much the same information, except it returns it as a hash with column keys instead of an array of two element arrays.
Model.db_schema # {:column1=>{:allow_null=>true, :db_type=>'varchar(255)', :default=>'blah', :primary_key=>false, :type=>:string}, ...}
== Columns used by a dataset/model
Dataset#columns returns the columns of the current dataset as an array of symbols:
DB[:table].columns # [:column1, :column2, :column3, ...]
Dataset#columns! does the same thing, except it ignores any cached value. In general, the cached value should never be incorrect, unless the database schema is changed after the dataset is created.
DB[:table].columns! # [:column1, :column2, :column3, ...]
Model.columns does the same thing as Dataset#columns, using the model's dataset:
Model.columns # [:column1, :column2, :column3, ...]
== Associations Defined
Sequel::Model offers complete introspection capability for all associations.
You can get an array of association symbols with Model.associations:
Model.associations # [:association1, :association2, ...]
You can get the association reflection for a single association via the Model.association_reflection. Association reflections are subclasses of hash:
Model.association_reflection(:association1) # #
You can get an array of all association reflections via Model.all_association_reflections:
Model.all_association_reflections # [#, ...]
Finally, you can get a hash of association reflections via Model.association_reflections:
Model.association_reflections # {:association1=>#, ...}
== Validations Defined
When using the validation_class_methods plugin, you can use the validation_reflections class method to get a hash with validation reflection information. This returns a hash keyed on the column name symbol:
Model.validation_reflections[:column] # => [[:presence, {}], [:length, {:maximum=>255, :message=>'is just too long'}]]
Similarly, when using the constraint_validations plugin, you can use the constraint_validation_reflections class method:
Model.constraint_validation_reflections[:column] # => [[:presence, {}], [:max_length, {:argument=>255, :message=>'is just too long'}]]
sequel-5.63.0/doc/release_notes/ 0000775 0000000 0000000 00000000000 14342141206 0016475 5 ustar 00root root 0000000 0000000 sequel-5.63.0/doc/release_notes/1.0.txt 0000664 0000000 0000000 00000002613 14342141206 0017536 0 ustar 00root root 0000000 0000000 === New code organization
Sequel is now divided into two parts: sequel_core and sequel_model.
These two parts are distributed as two separate gems. The sequel gem
bundles sequel_core and sequel_model together. If you don't use
Sequel::Model in your code, you can just install and use sequel_core.
=== New model hooks implementation
The hooks implementation have been rewritten from scratch, is much
more robust and offers a few new features:
* More ways to define hooks: hooks can now be defined by supplying a
block or a method name, or by overriding the hook instance method.
* Inheritable hooks: Hooks can now be inherited, which means that you
can define general hooks in a model superclass, and use them in
subclasses. You can also define global hooks on Sequel::Model that
will be invoked for all model classes.
* Hook chains can be broken by returning false from within the hook.
* New after_initialize hook, invoked after instance initialization.
* The hook invocation order can no longer be changed. Hooks are
invoked in order of definition, from the top of the class hierarchy
(that is, from Sequel::Model) down to the specific class.
=== Miscellanea
* Removed deprecated adapter stubs, and all other deprecations in both
sequel_core and sequel_model.
* Fixed String#to_time to raise error correctly for invalid time
stamps.
* Fixed error behavior when parse_tree or ruby2ruby are not available.
sequel-5.63.0/doc/release_notes/1.1.txt 0000664 0000000 0000000 00000010564 14342141206 0017543 0 ustar 00root root 0000000 0000000 === DRY Sequel models
With the new Sequel release you no longer need to explicitly specify
the table
name for each model class, assuming your model name is the singular of
the
table name (just like in ActiveRecord or DataMapper):
class UglyBug < Sequel::Model
end
UglyBug.table_name #=> :ugly_bugs
=== New model validations and support for virtual attributes
Sequel model now include validation functionality which largly follows
the
validations offered in ActiveRecord. Validations can be checked
anytime by
calling Model#valid?, with validation errors accessible through
Model#errors:
class Item < Sequel::Model
validates_presence_of :name
end
my_item = Item.new
my_item.valid? #=> false
my_item.errors.full_messages #=> ["name is not present"]
The Model#save method has been changed to check for validity before
saving. If
the model instance is not valid, the #save method returns false
without saving
the instance. You can also bypass the validity test by calling
Model#save!
instead.
Model classes also now support virtual attributes, letting you assign
values to
any attribute (virtual or persistent) at initialization time:
class User < Sequel::Model
attr_accessor :password
end
u = User.new(:password => 'blah', ...)
u.password #=> 'blah'
Also, virtual attributes can be validated just like persistent
attributes.
=== Other changes (long list!)
* Added Model#reload as alias to Model#refresh.
* Changed Model.create to accept a block (#126).
* Fixed Model#initialize to accept nil values (#115).
* Added Model#update_with_params method with support for virtual
attributes and auto-filtering of unrelated parameters, and changed
Model.create_with_params to support virtual attributes (#128).
* Fixed Model.dataset to correctly set the dataset if using implicit
naming or inheriting the superclass dataset (thanks celldee).
* Finalized support for virtual attributes.
* Fixed Model#set to work with string keys (#143).
* Fixed Model.create to correctly initialize instances marked as new
(#135).
* Fixed Model#initialize to convert string keys into symbol keys. This
also fixes problem with validating objects initialized with string
keys (#136).
* Added Dataset#table_exists? convenience method.
* Changed Dataset#group_and_count to accept multiple columns (#134).
* Added Dataset#select_all method.
* Added Dataset#select_more, Dataset#order_more methods (#129).
* Fixed Dataset#count to work correctly for grouped datasets (#144).
* Fixed joining datasets using aliased tables (#140).
* Added support for UNSIGNED constraint, used in MySQL? (#127).
* Implemented constraint definitions inside Database#create_table.
* Enhanced Database.connect to accept options with string keys, so it
can now accept options loaded from YAML files. Database.connect also
automatically converts :username option into :user for compatibility
with existing YAML configuration files for AR and DataMapper.
* Changed ODBC::Database to support connection using driver and
database name, also added support for untitled columns in
ODBC::Dataset (thanks Leonid Borisenko).
* Changed MySQL adapter to support specifying socket option.
* Fixed MySQL adapter to correctly format foreign key definitions
(#123).
* Changed MySQL::Dataset to allow HAVING clause on ungrouped datasets,
and put HAVING clause before ORDER BY clause (#133).
* Changed mysql adapter to default to localhost if :host option is not
specified (#114).
* Added String#to_date. Updated mysql adapter to use String#to_date
for mysql date types (thanks drfreeze).
* Fixed postgres adapter to define PGconn#async_exec as alias to #exec
if not defined (for pure-ruby postgres driver).
* Changed postgres adapter to quote column references using double
quotes.
* Applied patch for oracle adapter: fix behavior of limit and offset,
transactions, #table_exists?, #tables and additional specs (thanks
Liming Lian #122).
* Added support additional field types in postgresql adapter (#146).
* Added support for date field types in postgresql adapter (#145).
* Added support for limiting and paginating datasets with fixed SQL,
e.g. using Database#fetch.
* Added new Dataset#from_self method that returns a dataset selecting
from the original dataset.
* Allow for additional filters on a grouped dataset (#119 and #120)
* Refactored Sequelizer to use Proc#to_sexp (method provided by r2r).
* Fixed bin/sequel to require sequel_model if available.
sequel-5.63.0/doc/release_notes/1.3.txt 0000664 0000000 0000000 00000006067 14342141206 0017550 0 ustar 00root root 0000000 0000000 === Better model associations
The latest release of sequel_model includes a new associations
functionality written by Jeremy Evans which replaces the old relations
code in previous versions. Please note that this version is not
completely backward-compatible and you should therefore upgrade with
caution.
The new implementation supports three kinds of relations: one_to_many,
many_to_one and many_to_many, which correspond to has_many, belongs_to
and has_and_belongs_to_many relations in ActiveRecord. In fact, the
new implementation includes aliases for ActiveRecord assocation macros
and is basically compatible with ActiveRecord conventions. It also
supports DRY implicit class name references. Here's a simple example:
class Author < Sequel::Model
has_many :books # equivalent to one_to_many
end
class Book < Sequel::Model
belongs_to :author # equivalent to many_to_one
has_and_belongs_to_many :categories # equivalent to many_to_many
end
class Category < Sequel::Model
has_and_belongs_to_many :books
end
These macros will create the following methods:
* Author#books, Author#add_book, Author#remove_book
* Book#author, Book#categories, Book#add_category,
Book#remove_category
* Category#books, Category#add_book, Category#remove_book
Unlike ActiveRecord, one_to_many and many_to_many association methods
return a dataset:
a = Author[1234]
a.books.sql #=> 'SELECT * FROM books WHERE (author_id = 1234)'
You can also tell Sequel to cache the association result set and
return it as an array:
class Author < Sequel::Model
has_many :books, :cache => true
end
Author[1234].books.class #=> Array
You can of course bypass the defaults and specify class names and key
names:
class Node < Sequel::Model
belongs_to :parent, :class => Node
belongs_to :session, :key => :producer_id
end
Another useful option is :order, which sets the order for the
association dataset:
class Author < Sequel::Model
has_many :books, :order => :title
end
Author[1234].books.sql #=> 'SELECT * FROM books WHERE (author_id =
1234) ORDER BY title'
More information about associations can be found in the Sequel
documentation.
=== Other changes
* Added configuration file for running specs (#186).
* Changed Database#drop_index to accept fixed arity (#173).
* Changed column definition sql to put UNSIGNED constraint before
unique in order to satisfy MySQL (#171).
* Enhanced MySQL adapter to support load data local infile_, added
compress option for mysql connection by default (#172).
* Fixed bug when inserting hashes in array tuples mode.
* Changed SQLite adapter to catch RuntimeError raised when executing a
statement and raise Error::InvalidStatement with the offending SQL and
error message (#188).
* Fixed Dataset#reverse to not raise for unordered dataset (#189).
* Added Dataset#unordered method and changed #order to remove order if
nil is specified (#190).
* Fixed reversing order of ASC expression (#164).
* Added support for :null => true option when defining table columns
(#192).
* Fixed Symbol#method_missing to accept variable arity (#185).
sequel-5.63.0/doc/release_notes/1.4.0.txt 0000664 0000000 0000000 00000004616 14342141206 0017705 0 ustar 00root root 0000000 0000000 Eager loading for all types of associations:
Artist.eager(:albums).all
Album.eager(:artist, :genre, :tracks).all
Album.eager(:artist).eager(:genre).eager(:tracks).all
Album.filter(:year=>2008).eager(:artist).all
Eager loading supports cascading to an unlimited depth, and doesn't have
any aliasing issues:
Artist.eager(:albums=>:tracks).all
Artist.eager(:albums=>{:tracks=>:genre}).all
Unfortunately, eager loading comes at the expense of a small amount of
backward compatibility. If you were using uncached associations (the
default in sequel_model 0.5), they no longer work the same way. Now,
all associations act as if :cache=>true (which is now set for all
associations, so if you wrote a tool that worked with both cached and
uncached associations, it should still work).
One to many associations now populate the corresponding many to one
instance variable (even when eagerly loaded):
# Assuming: Album.one_to_many :tracks
album = Album.first
# This following code is only one query,
# not a query for the album and one for each track
album.tracks.each{|t| puts t.album.name}
ActiveRecord style has_many :through associations are now supported via
many_to_many. many_to_many will no longer select the entire result set,
just the columns of the associated table (and not the join table), so it
works for both has_and_belongs_to_many (simple join table) and has_many
:through (join table model) scenarios. If you want to include all or
part of the join table attributes, see the :select option for
many_to_many associations.
We reduced the number of gems from three (sequel, sequel_core,
sequel_model) to two (sequel, sequel_core). Basically, sequel_model is
now just sequel, and the old sequel gem metapackage is no longer. There
isn't a reason to have a gem metapackage for two gems when one
(sequel_model) depends on the other (sequel_core). This required a
version bump for the model part of sequel from 0.5.0.2 to 1.4.0 (since
the previous sequel gem version was 1.3).
Sequel 1.4.0 has fixes for 11 trackers issues, including fixes to the
MySQL and PostgreSQL adapters.
We have switched the source control repository for Sequel from Google
Code (which uses subversion) to github (which uses git). If you would
like to contribute to Sequel, please fork the github repository, make
your changes, and send a pull request. As before, posting patches on
the Google Code issue tracker is fine as well.
sequel-5.63.0/doc/release_notes/1.5.0.txt 0000664 0000000 0000000 00000011710 14342141206 0017677 0 ustar 00root root 0000000 0000000 You can now graph a dataset and have the result split into component
tables:
DB[:artists].graph(:albums, :artist_id=>:id).first
# => {:artists=>{:id=>artists.id, :name=>artists.name}, \
# :albums=>{:id=>albums.id, :name=>albums.name,
:artist_id=>albums.artist_id}}
This aliases columns if necessary so they don't stomp on each other,
which
is what usually happens if you just join the tables:
DB[:artists].left_outer_join(:albums, :artist_id=>:id).first
# => {:id=>(albums.id||artists.id),
:name=>(albums.name||artist.names), \
:artist_id=>albums.artist_id}
Models can use graph as well, in which case the values will be model
objects:
Artist.graph(Album, :artist_id=>:id)
# => {:artists=>#, :albums=>#}
Models can now eager load via .eager_graph, which will load all the
results
and all associations in a single query. This is necessary if you want
to
filter on columns in associated tables. It works exactly the same way
as
.eager, and supports cascading of associations as well:
# Artist.one_to_many :albums
# Album.one_to_many :tracks
# Track.many_to_one :genre
Artist.eager_graph(:albums=>{:tracks=>:genre}).filter( \
:tracks_name=>"Firewire").all
This will give you all artists have have an album with a track named
"Firewire", and calling .albums on one of those artists will only return
albums that have a track named "Firewire", and calling .tracks on one of
those albums will return only the track(s) named "Firewire".
You can use set_graph_aliases to select specific columns:
DB[:artists].graph(:albums, :artist_id=>:id).set_graph_aliases( \
:artist_name=>[:artists, :name], :album_name=>[:albums,
:name]).first
# => {:artists=>{:name=>artists.name}, :albums=>{:name=>albums.name}}
You can use eager_graph with set_graph_aliases to have eager loading
with
control over the SELECT clause.
All associations now update their reciprocal associations whenever the
association methods are used, so you don't need to refresh the
association or model to have the reciprocal association updated:
Album.many_to_one :band
Band.one_to_many :albums
# Note that all of these associations are cached,
# so after the first access there are no additional
# database queries to fetch associated records.
# many_to_one setter adds to reciprocal association
band1.albums # => []
album1.band = band1
band1.albums # => [album1]
band2.albums # => []
album1.band = band2
band1.albums # => []
band2.albums # => [album1]
album1.band = band2
band2.albums # => [album1]
album1.band = nil
band2.albums # => []
# one_to_many add_* method sets reciprocal association
# one_to_many remove_* method removes reciprocal association
album1.band # => nil
band1.add_album(album1)
album1.band # => band1
band2.add_album(album1)
album1.band # => band2
band2.remove_album(album1)
album1.band # => nil
Post.many_to_many :tags
Tag.many_to_many :posts
# many_to_many add_* method adds to reciprocal association
# many_to_many remove_* method removes from reciprocal association
post1.tags # => []
tag1.posts # => []
tag1.add_post(post1)
post1.tags # => [tag1]
tag1.posts # => [post1]
tag1.remove_post(post1)
post1.tags # => []
tag1.posts # => []
post1.add_tag(tag1)
post1.tags # => [tag1]
tag1.posts # => [post1]
post1.remove_tag(tag1)
post1.tags # => []
tag1.posts # => []
The MySQL and PostgreSQL adapters now support index types:
index :some_column, :type => :hash # or :spatial, :full_text, :rtree,
etc.
Starting in Sequel 1.5.0, some methods are deprecated. These methods
will be
removed in Sequel 2.0.0. The deprecation framework is fairly flexible.
You
can choose where the messages get sent:
Sequel::Deprecation.deprecation_message_stream = STDERR # the default
Sequel::Deprecation.deprecation_message_stream = \
File.new('deprecation.txt', 'wb') # A file
Sequel::Deprecation.deprecation_message_stream = nil # ignore the
messages
You can even have all deprecation messages accompanied by a traceback,
so you
can see exactly where in your code you are using a deprecated method:
Sequel::Deprecation.print_tracebacks = true
All deprecation methods come with an message telling you what
alternative code
will work.
In addition to deprecating some methods, we removed the ability to have
arrays returned instead of hashes. The array code still had debugging
messages
left it in, and we are not aware of anyone using it. Hashes have been
returned
by default since Sequel 0.3.
We have also removed the Numeric date/time extensions (e.g. 3.days.ago).
The
existing extensions were incomplete, better ones are provided elsewhere,
and the extensions were not really related to Sequel's purpose.
Sequel no longer depends on ParseTree, RubyInline, or ruby2ruby. They
are
still required to use the block filters. Sequel's only gem dependency
is on
the tiny metaid.
Sequel 1.5.0 has fixes for 12 tracker issues, including fixes to the
Informix,
MySQL, ODBC, ADO, JDBC, Postgres, and SQLite adapters.
sequel-5.63.0/doc/release_notes/2.0.0.txt 0000664 0000000 0000000 00000026654 14342141206 0017710 0 ustar 00root root 0000000 0000000 Blockless Filter Expressions
----------------------------
Before 2.0.0, in order to specify complex SQL expressions, you
either had to resort to writing the SQL yourself in a string or
using an expression inside a block that was parsed by ParseTree.
Because ParseTree was required, only ruby 1.8.* was supported, and
supporting other ruby versions (ruby 1.9, JRuby, Rubinius) would
never be possible.
With 2.0.0, you no longer need to use a block to write complex SQL
expressions. The basics of the blockless filters are the usual
arithmetic, inequality, and binary operators:
+ = addition
- = subtraction
* = multiplication
/ = division
> = greater than
< = less than
>= = greater than or equal to
<= = less than or equal to
~ = negation
& = AND
| = OR
You can use these operators on Symbols, LiteralStrings, and other
Sequel::SQL::Expressions. Note that there is no equal operator or
not equal operator, to specify those, you use a Hash.
Here are some examples:
# Ruby code => SQL WHERE clause
:active => active
~:active => NOT active
~~:active => active
~~~:active => NOT active
:is_true[] => is_true()
~:is_true[] => NOT is_true()
:x > 100 => (x > 100)
:x < 100.01 => (x < 100.01)
:x <= 0 => (x <= 0)
:x >= 1 => (x >= 1)
~(:x > 100) => (x <= 100)
{:x => 100} => (x = 100)
{:x => 'a'} => (x = 'a')
{:x => nil} => (x IS NULL)
~{:x => 100} => (x != 100)
~{:x => 'a'} => (x != 'a')
~{:x => nil} => (x IS NOT NULL)
{:x => /a/} => (x ~ 'blah') # Default, MySQL different
~{:x => /a/} => (x !~ 'blah') # Default, MySQL different
:x.like('a') => (x LIKE 'a')
~:x.like('a') => (x NOT LIKE 'a')
:x.like(/a/) => (x ~ 'a') # Default, MySQL different
~:x.like('a', /b/) => ((x NOT LIKE 'a') AND (x !~ 'b')) # Default
~{:x => 1..5} => ((x < 1) OR (x > 5))
~{:x => DB[:items].select(:i)} => (x NOT IN (SELECT i FROM items))
~{:x => [1,2,3]} => (x NOT IN (1, 2, 3))
:x + 1 > 100 => ((x + 1) > 100)
(:x * :y) < 100.01 => ((x * y) < 100.01)
(:x - :y/2) >= 100 => ((x - (y / 2)) >= 100)
(((:x - :y)/(:x + :y))*:z) <= 100 => ((((x - y) / (x + y)) * z) <=
100)
~((((:x - :y)/(:x + :y))*:z) <= 100) => ((((x - y) / (x + y)) * z) >
100)
:x & :y => (x AND y)
:x & :y & :z => ((x AND y) AND z)
:x & {:y => :z} => (x AND (y = z))
{:y => :z} & :x => ((y = z) AND x)
{:x => :a} & {:y => :z} => ((x = a) AND (y = z))
(:x > 200) & (:y < 200) => ((x > 200) AND (y < 200))
:x | :y => (x OR y)
:x | :y | :z => ((x OR y) OR z)
:x | {:y => :z} => (x OR (y = z))
{:y => :z} | :x => ((y = z) OR x)
{:x => :a} | {:y => :z} => ((x = a) OR (y = z))
(:x > 200) | (:y < 200) => ((x > 200) OR (y < 200))
(:x | :y) & :z => ((x OR y) AND z)
:x | (:y & :z) => (x OR (y AND z))
(:x & :w) | (:y & :z) => ((x AND w) OR (y AND z))
~((:x | :y) & :z) => ((NOT x AND NOT y) OR NOT z)
~((:x & :w) | (:y & :z)) => ((NOT x OR NOT w) AND (NOT y OR NOT z))
~((:x > 200) | (:y & :z)) => ((x <= 200) AND (NOT y OR NOT z))
~('x'.lit + 1 > 100) => ((x + 1) <= 100)
'x'.lit.like(/a/) => (x ~ 'a') # (x ~ \'a\')
None of these require blocks, you can use any directly in a call to
filter:
DB[:items].filter((:price * :tax) - :discount > 100)
# => SELECT * FROM items WHERE (((price * tax) - discount) > 100)
DB[:items].filter(:active & ~:archived)
# => SELECT * FROM items WHERE (active AND NOT archived)
SQL String Concatenation
------------------------
Sequel now has support for expressing SQL string concatenation in an
easy way:
[:name, :title].sql_string_join(" - ")
# SQL: name || ' - ' || title
You can use this in selecting columns, creating filters, ordering
datasets, and possibly elsewhere.
Schema Reflection Support/Typecasting on Assignment
---------------------------------------------------
When used with PostgreSQL, MySQL, or SQLite, Sequel now has the
ability to get information from the database's schema in regards
to column types:
DB.schema(:artist)
=> [[:id, {:type=>:integer, :db_type=>"integer", :max_chars=>0
:numeric_precision=>32, :allow_null=>false,
:default=>"nextval('artist_id_seq'::regclass)"}], [:name,
{:type=>:string, :default=>nil, :db_type=>"text",
:numeric_precision=>0, :allow_null=>true, :max_chars=>0}]]
Models now use this information to typecast values on attribute
assignment. For example, if you have an integer column named number
and a text (e.g. varchar) column named title:
1.5.1:
model.number = '1'
model.number # => '1'
model.title = 1
model.title # => 1
2.0.0:
model.number = '1'
model.number # => 1
model.title = 1
model.title # => '1'
Typecasting can be turned off on a global, per class, and per object
basis:
Sequel::Model.typecast_on_assignment = false # Global
Album.typecast_on_assignment = false # Per Class
Album.new.typecast_on_assignment = false # Per Object
Typecasting is somewhat strict, it does not allow obviously bogus
data to be used:
model.number = 'a' # Raises error
This is in contrast to how some other ORMs handle the situation:
model.number = 'a'
model.number # => 0
If Sequel is being used with a web framework and you want to display
friendly error messages to the user, you should probably turn
typecasting off and set up the necessary validations in your models.
Model Association Improvements
------------------------------
Associations can now be eagerly loaded even if they have a block,
though the block should not rely on being evaluated in the context
of an instance. This allows you filter on associations when eagerly
loading:
Artist.one_to_many :albums_with_10_tracks, :class=>:Album do |ds|
ds.filter(:num_tracks => 10)
end
Artist.filter(:name.like('A%)).eager(:albums_with_10_tracks).all
# SELECT * FROM artists WHERE (name LIKE 'A%')
# SELECT albums.* FROM albums WHERE ((artist_id IN (...)) AND
# (num_tracks = 10))
Associations now have a remove_all_ method for removing all
associated objects in a single query:
Artist.many_to_many :albums
Artist[1].remove_all_albums
# DELETE FROM albums_artists WHERE artist_id = 1
Artist.one_to_many :albums
Artist[1].remove_all_albums
# UPDATE albums SET artist_id = NULL WHERE artist_id = 1
All associations can specify a :select option to change which columns
are selected. Previously only many to many associations suppported
this.
The SQL used when eagerly loading through eager_graph can be
modified via the :graph_join_type, :graph_conditions, and
:graph_join_conditions options.
:graph_join_type changes the join type from the default of
:left_outer. This can be useful if you do not want any
albums that don't have an artist in the result set:
Album.many_to_one :artist, :graph_join_type=>:inner
Album.eager_graph(:artist).sql
# SELECT ... FROM albums INNER JOIN artists ...
:graph_conditions adds conditions on the join to the table you are
joining, the eager_graph equivalent of an association block argument
in eager. It takes either a hash or an array where all elements
are arrays of length two, similar to join_table, where key symbols
specify columns in the joined table and value symbols specify
columns in the last joined or primary table:
Album.many_to_one :artist, :graph_conditions=>{:active=>true}
Album.eager_graph(:artist).sql
# SELECT ... FROM albums LEFT OUTER JOIN artists ON ((artists.id =
# albums.artist_id) AND (artists.active = 't'))
:graph_join_table_conditions exists for many to many associations only,
and operates the same as :graph_conditions, except it specifies a
condition on the many to many join table instead of the associated
model's table. This is necessary if the join table is also model
table with other columns on which you may want to filter:
Album.many_to_many :genres, :join_table=>:ag, \
:graph_join_table_conditions=>{:active=>true}
Album.eager_graph(:genres).sql
# SELECT ... FROM albums LEFT OUTER JOIN ag ON ((ag.album_id =
albums.id) AND (ag.active = 't')) LEFT OUTER JOIN genres ON
(genres.id = ag.genre_id)
Other Small Improvements
------------------------
* Dataset#invert returns a dataset that matches all records not
matching the current filter.
* Dataset#unfiltered returns a dataset that has any filters removed.
* Dataset#last_page? and Dataset#first_page? for paginated datasets.
* The sequel command line tool now support an -E or --echo argument
that logs all SQL to the standard output. It also can take a path
to a yaml file with database connection options, in addition to a
database URL.
* Databases can now have multiple SQL loggers, so you can log to the
standard output as well as a file.
* SQL identifiers (columns and tables) are now quoted by default (you
can turn this off via Sequel.quote_identifiers = false if need be).
* Sequel.connect now takes an optional block that will disconnect the
database when the block finishes.
* AlterTableGenerator now has add_primary_key and add_foreign_key
methods.
* Running the specs without ParseTree installed skips the specs that
require ParseTree.
* You can use an array of arrays instead of a hash when specifying
conditions, which may be necessary in certain situations where
you would be using the same hash key more than once.
* Almost all documentation for Sequel was updated for 2.0.0, so if you
found Sequel documentation lacking before, check out the new RDoc
pages.
* There have been many minor refactoring improvements, the code
should now be easier to read and follow.
* Sequel now has no external dependencies.
* Sequel::Models now have before_validation and after_validation
hooks.
* Sequel::Model hooks that return false cause the methods that call
them (such as save) to return false.
* Sequel::Models can now load their schema on first instantiation,
instead of when they are created, via
Sequel::Model.lazy_load_schema=. This is helpful for certain
web frameworks that reload all models on every request.
* Hook methods that use blocks can now include an optional tag,
which allows them to work well with web frameworks that load source
files every time they are modified.
The PostgreSQL adapter has been rewritten and now supports ruby-pg.
There have also been improvements in the following adapters: DBI,
MySQL, SQLite, Oracle, and MSSQL.
All of the methods that have been deprecated in 1.5.0 have now been
removed. If you are want to upgrade to Sequel 2.0.0 from version 1.4.0
or previous, upgrade to 1.5.1 first, fix all of the deprecation
warnings that show up, and then upgrade to 2.0.0.
There were some backwards incompatible changes made in 2.0.0 beyond the
removal of deprecated methods. These are:
* Inflector is no longer used, the inflection methods were moved
directly into String (where they belong because inflections only
make sense for strings). So to override singularization
or pluralization rules, use String.inflections instead of
Inflector.inflections.
* MySQL tinyints are now returned as boolean values instead of
integers. MySQL doesn't have a boolean type, and usually it
is recommended to use tinyint for a boolean column.
* You can no longer pass an array to Dataset#order or Dataset#select,
you need to pass each argument separately (the * operator is your
friend).
* You must use '?' instead of '(?)' when interpolating an array
argument into a string (e.g. filter('x IN ?', [1,2,3]))
* You must pass an explicit table alias argument to join_table and
related methods, you can no longer include the table alias
inside the table argument.
* sqlite:// URLs now operate the same as file:// URLs (2 slashes
for a relative path, 3 for an absolute path).
sequel-5.63.0/doc/release_notes/2.1.0.txt 0000664 0000000 0000000 00000023075 14342141206 0017703 0 ustar 00root root 0000000 0000000 Model Improvements
------------------
* one_to_many/many_to_many associations now support a :limit option,
adding a limit/offset to the records returned. This was possible
before using a block, so it is just added for convenience.
* Associations now support a :read_only option, which doesn't create
methods that modify the database.
* Associations now support a :graph_select option, which allows
specifying the columns of associated models to include when using
eager_graph.
* one_to_many associations now have a :one_to_one option. When used
it creates a getter and setter method similar to many_to_one. This
fills the same role as ActiveRecord's has_one, but it is
implemented as a couple of convenience methods over one_to_many, so
it still requires that you specify the association name as a
plural.
* Model datasets now have to_hash augmented so that it can be called
without any arguments, in which case it yields an identity map (a
hash with keys being primary key values and values being model
instances).
* The Model.set_sti_key method was added, for easily setting up
single table inheritance. It should be called only in the parent
class.
* Calls to def_dataset_method with a block are now cached and
reapplied to the new dataset if set_dataset is called afterward,
or in a subclass.
* All validation methods can now be made conditional via an :if
option, which takes either a symbol (which specifies an instance
method) or a proc (which is instance_evaled).
* Model#set and Model#update have been added back, they are now
aliases of #set_with_params and #update_with_params.
* Models now have set_only/set_except/update_only/update_except
instance methods that take a hash (like you would provide to
set or update) and additional arguments specifying which columns
to allow or disallow.
* Models now have a set_allowed_columns and set_restricted_columns
methods, which operate similarly to ActiveRecord's attr_accessible
and attr_protected. It is recommend that you use the set_only or
update_only instead of these methods, though. You can ignore the
allowed or restricted columns by using #set_all or #update_all.
* The primary key column(s) is restricted by default. To allow it to
be set via new/set/update, use:
Sequel::Model.unrestrict_primary_key # Global
Artist.unrestrict_primary_key # Per Class
* It is now easy to override the one_to_many/many_to_many association
methods that modify the database (add_/remove_/remove_all_), as
they have been broken into two methods, one that handles the
caching features and a private one (prepended with an _) that
handles the database changes (and which you can easily override
without worrying about the caching).
Table Joining
-------------
Dataset#join_table got a nice overhaul. You can now use any join
type your database allows:
DB[:artist].join_table(:natural, :albums)
DB[:numbers].join_table(:cross, :numbers)
You can now specify the conditions as
* String: "a.b = c.d" # ON a.b = c.d
* Expression :x < :y # ON x < y
* Array of Symbols: [:x, :y, :z] # USING (x, y, z)
* nil # no conditions, used for NATURAL or CROSS joins
Dataset#join_table also takes a block that yields three arguments:
* join_table_alias - The alias/name of the table currently being
joined
* last_join_table_alias - The alias name of the last table joined
(if there was one) or the first FROM table (if not).
* joins - An array of JoinClause objects for all previous joins in
the query.
Using the block you can specify conditions for complex joins without
needing to know in advance what table aliases will be used.
Expanded SQL Syntax Support
---------------------------
SQL Case statements are now supported directly using hashes or
arrays:
{:x > 1 => 1}.case(0)
# CASE WHEN x > 1 THEN 1 ELSE 0 END
[[{:x=>1}, 0], [:x < 1, 1], [:x > 1, 2]].case(-1)
# CASE WHEN x = 1 THEN 0 WHEN x < 1 THEN 1 WHEN x > 1 THEN 2
ELSE -1 END
You should use an array instead of a hash for multiple conditions
unless all conditions are orthogonal.
The SQL extract function has special syntax:
EXTRACT(day FROM date)
This syntax is now supported via the following ruby code:
:date.extract(:day)
Other Notable Changes
---------------------
* The sequel command line tool can now run migrations. The -m
option specifies the directory holding the migration files,
and the -M options specifies the version to which to migrate.
* The PostgreSQL adapter supports nested transactions/savepoints.
* The schema parser now understands decimal fields, and will
typecast to BigDecimal.
* PostgreSQL's numeric type is now recognized and returned as
BigDecimal.
* HAVING now comes before ORDER BY, which most databases seem to
prefer. If your database wants HAVING after ORDER BY, please
let us know.
* Symbol#qualify now exists, to specify the table name for a given
symbol, similar to the use of #as to specify an alias. This is
mainly helpful in conjuction with the #join_table block, as that
provides the table aliases to use to qualify the columns inside
the block.
* BitwiseMethods (&, |, ^, ~, <<, >>) have been added to the
NumericExpression class, so you can do the following:
(x + 1) ^ 10 # SQL: (x + 1) ^ 10
~(x + 1) # SQL: ~(x + 1)
Usually, &, |, and ~ operate in a logical manner, but for
NumericExpressions, they take on their usual bitwise meaning,
since logical operations only make sense for booleans.
* #cast_numeric and #cast_string exist for Symbols, Strings, and
other Sequel Expressions, which return the results casted and
wrapped in either NumericExpression or StringExpression, so you
can use the BitwiseMethods (&, |, ^, ~, <<, >>) or
StringConcatenationMethods (+) directly.
# Dataset#to_hash can take only one argument, in which case it uses
that argument to specify the key, and uses the entire hash for the
value.
# Dataset#graph can now take an array of columns to select from the
joined table via the :select option.
# Dataset#filter and similar methods now combine the block and
regular argument conditions if both are given, instead of ignoring
the regular argument conditions.
# Dataset#filter(false) can now be used to make sure that no records
are returned. Dataset#filter(true) also works, but it's a no-op.
Before, these raised errors.
# Dataset#count does a subquery for a dataset using DISTINCT, since
the otherwise it would yield a count for the query without
DISTINCT.
ParseTree Support Officially Deprecated
---------------------------------------
The support for ParseTree-based block filters has officially been
deprecated and will be removed in Sequel 2.2. To use the
expression filters (which don't require ParseTree) inside blocks,
use:
SEQUEL_NO_PARSE_TREE = true
require 'sequel'
# OR
require 'sequel'
Sequel.use_parse_tree = false
This is the default if ParseTree cannot be loaded. If ParseTree
can be loaded, it remains the default, in order not to immediately
break existing code.
With this set, you can use the expression filters inside of blocks:
dataset.filter{((:x + 1) & 10 < :y) & :z}
That doesn't gain you all that much, but there are some methods
that feed block arguments into filter, such as the following:
dataset.first(5){((:x + 1) & 10 < :y) & :z}
Which will get you the first 5 records matching the condition.
Backwards Incompatible Changes
------------------------------
* To change the datetime classe used from Time to DateTime, you
now use:
Sequel.datetime_class = DateTime # instead of Sequel.time_class
* Models now raise errors if you try to access a missing or
restricted method via new/set/update, instead of just silently
skipping that parameter. To get the old behavior:
Sequel::Model.strict_param_setting = false
* The association_dataset method now takes into account the :eager
option and the block argument, where it didn't before. It also
takes into account the new :limit option.
* Association methods now raise errors in most cases if the model
doesn't have a valid primary key.
* Dataset#join_table used to allow a symbol as a conditions argument
as a shortcut for a hash:
DB[:artist].join(:albums, :artist_id)
# ON albums.artist_id = artists.id
With the changes to #join_table, this no longer works. It would
now be interpreted as a boolean column:
DB[:artist].join(:albums, :artist_id)
# ON artists.id
Use the following slightly longer version for the old behavior:
DB[:artist].join(:albums, :artist_id=>:id)
# ON albums.artist_id = artists.id
* MySQL users need to be careful when upgrading, the following code
will once again cause an error:
DB[:artists].each{|artist| DB[:albums].each{|album| ...}}
To fix it, change the code to:
DB[:artists].all{|artist| DB[:albums].each{|album| ...}}
The issue is the MySQL adapter doesn't release the database
connection while running each, and the second call to each gets the
same database connection (while the other query is still running),
because it is in the same thread. Using #all for the outside query
ensures that the database connection is released before the block is
called.
The reason for this change was that the workaround provided for
MySQL could potentially cause issues with transactions for all
adapters.
* String#asc and String#desc are no longer defined, as ordering on a
plain string column should be a no-op. They are still defined
on LiteralStrings.
* You can no longer abuse the SQL::Function syntax to use a table
alias with specified columns (e.g. :table[:col1, :col2, :col3])
or to cast to types (e.g. :x.cast_as(:varchar[20])). Use a
LiteralString in both cases.
sequel-5.63.0/doc/release_notes/2.10.0.txt 0000664 0000000 0000000 00000033450 14342141206 0017761 0 ustar 00root root 0000000 0000000 New Supported Adapters and Databases
------------------------------------
* A DataObjects adapter was added that supports PostgreSQL,
MySQL, and SQLite. DataObjects is the underlying database
library used by DataMapper, and has potential performance
advantages by doing all typecasting in C.
* A Firebird Adapter was added, it requires the modified Ruby
Fb adapter found at http://github.com/wishdev/fb.
* An H2 JDBC subadapter was added, based on the code used in JotBot.
H2 is an embeddable Java database, and may be preferable to using
SQLite on JDBC because SQLite requires native code.
New Core Features
-----------------
* Sequel now has database independent migrations. Before, column
types in migrations were not translated per database, so it was
difficult to set up a migration that worked on multiple databases.
Sequel now accepts ruby classes as database types, in addition to
symbols and strings. If a ruby class is used, it is translated
to the most appropriate database type. Here is an example using
all supported classes (with Sequel's default database type):
DB.create_table(:cats) do
primary_key :id, :type=>Integer # integer
String :a # varchar(255)
column :b, File # blob
Fixnum :c # integer
foreign_key :d, :other_table, :type=>Bignum # bigint
Float :e # double precision
BigDecimal :f # numeric
Date :g # date
DateTime :h # timestamp
Time :i # timestamp
Numeric :j # numeric
TrueClass :k # boolean
FalseClass :l # boolean
end
Type translations were tested on the PostgreSQL, MySQL, SQLite,
and H2 databases. The default translations should work OK for
most databases, but there will probably be a type or two that
doesn't work. Please send in a patch if Sequel uses a column type
that doesn't work on your database.
Note that existing migrations still work fine, in most cases. If
you were using strings or symbols for types before, they should
still work. See the Backwards Compatibility section below for
details.
Also note that this doesn't relate solely to migrations, as any
database schema modification method that accepts types will
accept one of the above classes.
* A ton of internal work was done to better support databases that
fold unqouted identifiers to uppercase (which is the SQL standard).
Sequel now allows you to set a method to call on identifiers going
both into and out of the database. The default is to downcase
identifiers coming out, and upcase identifiers going in, though
this is overridden by the PostgreSQL, MySQL, and SQLite adapters
to not do anything (since they fold to lowercase by default).
The settings are called identifier_input_method and
identifier_output_method, and like most Sequel settings, they can
be set globally, per database, or per dataset:
# Global (use uppercase in ruby and lowercase in the database)
Sequel.identifier_input_method = :downcase
Sequel.identifier_output_method = :upcase
# Per Database (use camelized names in the database, and
# underscored names in ruby)
DB.identifier_input_method = :camelize
DB.identifier_output_method = :underscore
# Per Dataset (obfuscate your database columns!)
class String; def rot_13; tr('A-Za-z', 'N-ZA-Mn-za-m') end end
ds = DB[:table]
ds.identifier_input_method = :rot_13
ds.identifier_output_method = :rot_13
* Schema parsing support was added to the JDBC adapter, using the
JDBC metadata methods. This means that models that use the
JDBC adapter will typecast data in their column setters and
automatically select the correct primary key column(s). This is
currently the only adapter that supports schema parsing when using
an MSSQL or Oracle database.
* Database#create_table now takes options, which you can use to
specify a MySQL engine, charset, and/or collation. You can also
set a default engine, charset, and collation for MySQL to use:
Sequel::MySQL.default_engine = 'InnoDB'
Sequel::MySQL.default_charset = 'utf8'
Sequel::MySQL.default_collate = 'utf8'
The defaults will be used if the options are not provided. If a
default engine is set, you can specify :engine=>nil to not use it
(same goes for charset and collate).
* The Sequel::DatabaseConnectionError exception class was added. It
is raised by the connection pool if there is an error attempting
to instantiate a database connection. Also, if the adapter returns
nil instead of raising an error for faulty connection parameters,
DatabaseConnectionError will be raised immediately, instead of the
connection pool busy waiting until if gives up with a
PoolTimeoutError.
* Database#tables is now supported on the JDBC adapter, returning
an Array of table name symbols.
* Sequel now converts the following Java types returned by the JDBC
adapter into ruby types: Java::JavaSQL::Timestamp,
Java::JavaSQL::Time, Java::JavaSQL::Date,
Java::JavaMath::BigDecimal, and Java::JavaIo::BufferedReader.
* When using the PostgreSQL adapter with the postgres-pr driver,
Sequel will use a custom string escaping routine unless
force_standard_strings = false. This means that using Sequel's
defaults, postgres-pr will correctly escape strings now.
* The SQLite adapter now returns float, real, and double precision
columns as Floats.
* The SQLite adapter logs beginning, committing, and rolling back
transactions.
* Sequel now has an internal version (before, the only way to tell
the version was to look at the gem being used). It is accessible
at Sequel.version.
New Model Features
------------------
* A new validates_not_string validation was added for Sequel Models.
It is intended to be used with the raise_on_typecast_failure =
false setting. In this case, for a non-string database column,
if there is a string value when the record is going to be
saved, it is due to the fact that Sequel was not able to typecast
the given data correctly (so it is almost certainly not valid).
This should make Sequel easier to use with web applications.
* An :allow_missing validation option was added to all standard
validations. This option skips the validation if the attribute
is not in the object's values. It is different from :allow_nil,
which will skip the value if it is present but nil in the values.
The intended use case for this option is when the database provides
a good default. If the attribute is not present in values, the
database will use its default. If the attribute is present in
the values but equals nil, Sequel will attempt to insert it into
the database as a NULL value, instead of using the database's
default. If you don't want Sequel to insert a NULL value in the
database, but you want the database to provide the default, this
is the option to use.
* validates_each now accepts :allow_nil and :allow_blank options,
so it is easier to create custom validations with the same options
as the standard validations.
* Before_* hooks now run in the reverse order that they were added.
The purpose of hooks is to wrap existing functionality, and making
later before_* hooks run before previous before_* hooks is the
correct behavior.
* You can now add you own hook types, via Model.add_hook_type. This
is intended for plugin use. All of the standard hooks are now
implemented using this method.
* The value of new? in a after_save hook now reflects the
previous state of the model (so true for a create and false for an
update), instead of always being false. This makes it easier
to have a complex after_save hook that still needs to
differentiate between a newly created record and an updated record,
without having to add separate after_create and after_update
hooks.
* The value of changed_columns in an after_update hook now reflects
the value before the update occurred, instead of usually being
empty. Previously, to have this functionality, you generally had
to save the value to an instance variable in a before_update hook
so you could reference it in the after_update hook.
Other Improvements
------------------
* Sequel now longer overwrites the following Symbol instance methods
when running on ruby 1.9: [], <, <=, >, and >=. One of Sequel's
principals is that it does not override methods defined by ruby,
and now that ruby 1.9 defines the above methods on Symbol, Sequel
shouldn't be overwriting them.
Sequel already provides a way to work around this issue when
another library adds the same methods to Symbol that Sequel does.
For example, you need to change the following:
dataset.filter(:number > 1)
dataset.filter(:number >= 2)
dataset.filter(:name < 'M')
dataset.filter(:name <= 'I')
dataset.filter(:is_bool[:x])
To:
dataset.filter{|o| o.number > 1}
dataset.filter{|o| o.number >= 2}
dataset.filter{|o| o.name < 'M'}
dataset.filter{|o| o.name <= 'I'}
dataset.filter{|o| o.is_bool(:x)}
The argument provided to the block is a Sequel::SQL::VirtualRow.
This class uses method_missing so that any methods called on it
return Sequel::SQL::Identifiers (if no arguments are provided)
or Sequel::SQL::Function (if arguments are provided).
If you were using one of the above symbol methods outside of a
filter, you can to call sql_string, sql_number, or sql_function
on the symbol. So the following would also work:
dataset.filter(:number.sql_number > 1)
dataset.filter(:number.sql_number >= 2)
dataset.filter(:name.sql_string < 'M')
dataset.filter(:name.sql_number <= 'I')
dataset.filter(:is_bool.sql_function(:x))
Using the block argument makes for a nicer API, though, so I
recommend using it when possible.
Note that if you are running ruby 1.8 or jruby without the --1.9
flag, you don't need to worry. If you are running ruby 1.9 or
jruby --1.9, or you plan to at some point in the future, you
should inspect your code for existing uses of these methods.
Here are a couple command lines that should find most uses:
# Find :symbol[]
egrep -nr ':['\''"]?[a-zA-Z_0-9]*['\''"]?\[' *
# Find :symbol (<|>|<=|>=)
egrep -nr '[^:]:['\''"]?[a-zA-Z_0-9]*['\''"]? *[<>]=?' *
* Database#quote_identifiers now affects future schema modifications
when using the database. Previous, it only affected future
schema modifications if a schema modification method had not yet
been called.
* Literalization of Times and DateTimes is now correct when using the
MySQL JDBC subadapter.
* Literalization of Blobs is now correct when using the PostgreSQL
JDBC subadapter.
* Index and table names are quoted when creating indices in the
PostgreSQL adapter.
* Dataset#delete was changed in the SQLite adapter to add a
where clause that is always true, instead of doing an explicit
count first and the deleting. This is simpler, though it
could potentially have performance implications.
* The sequel command line tool now supports symbol keys and unnested
hashes in YAML files, so it should work with Merb's database.yml.
It also includes the error class in the case of an error.
* The integration type tests were greatly expanded. Generally,
running the integration tests is a good way to determine how well
your database is supported.
* Dataset#quote_identifier now returns LiteralStrings as-is, instead
of treating them as regular strings.
* Sequel no longer modifies the MySQL::Result class when using the
MySQL adapter.
Backwards Compatibilty
----------------------
* If you were previously using a database that returned uppercase
identifiers, it will probably return lowercase identifiers by
default now. To get back the old behavior:
DB.identifier_output_method = nil
* The module hierarchy under Sequel::SQL has changed. Now,
modules do not include other modules, and the following modules
were removed since they would have been empty after removing
the modules they included: Sequel::SQL::SpecificExpressionMethods
and Sequel::SQL::GenericExpressionMethods.
* Sequel no longer assumes the public schema by default when
connecting to PostgreSQL. You can still set the default
schema to use (even to public).
* The ability to load schema information for all tables at once
was removed from the PostgreSQL adapter. While it worked, it had
some issues, and it was difficult to keep it working when some
new features were used. This ability wasn't exposed to the user,
and was purely an optimization. If you have any code like:
DB.schema
by itself after the Database object was instantiated, you should
remove it.
* The Database#primary_key API changed in the PostgreSQL shared
adapter, it now accepts an options hash with :server and :conn keys
instead of a server symbol. Also, quite a few private Database
instance methods changed, as well as some constants in the
AdapterMethods.
* It is possible that some migrations will break, though it is
unlikely. If you were using any of the classes mentioned above
as a method inside a migration, it might be broken. However,
since String, Float, and Integer wouldn't have worked as methods
before, it is unlikely that anyone used this.
* The meaning of #String, #Integer, and #Float inside
Sequel::SQL::Generator (i.e. inside a Database#create_table
block) has changed. Before, these used to call private Kernel
methods, now, they set up columns with the appropriate database
type.
* The Database#lowercase method in the DBI adapter was removed,
as its use case is now met by the identifier_output_method support.
* Database#uri is now aliased explicitly via a real method, to
allow for easier subclassing.
* You can no longer pass nil as the second argument to
Database#create_table.
sequel-5.63.0/doc/release_notes/2.11.0.txt 0000664 0000000 0000000 00000020022 14342141206 0017751 0 ustar 00root root 0000000 0000000 Optimizations
-------------
* Model.[] was optimized to use static SQL in cases where doing so
should result in the same output. This should result in a 30-40%
performance increase. Since this can be the most significant or
only method call in a web application action, this has potential
to significantly enhance the performance of web application code.
In order for this optimization to have an effect, you need to
make sure that you are calling set_dataset with a Symbol and
not a Dataset object:
# Optimized:
class Foo < Sequel::Model; end
class Foo < Sequel::Model(:foos); end
class Foo < Sequel::Model
set_dataset :foos
end
# Not Optimized, but otherwise equivalent:
class Foo < Sequel::Model(Model.db[:foos]); end
class Foo < Sequel::Model
set_dataset db[:foos]
end
* Dataset#literal was refactored for performance reasons to make
overriding it in subclasses unnecessary. The changes made result
in a 20-25% performance increase. Sequel can spend about 10% of
it's time in Dataset#literal, so this may be only a 2% overall
performance improvement.
New Features
------------
* Association datasets now know about the model objects that created
them, as well as the related association reflection. This makes
association extensions much more powerful. For example, you can
now create generic association extensions such as:
module FindOrCreate
def find_or_create(vals)
first(vals) || association_reflection.associated_class. \
create(vals.merge(association_reflection[:key]=> \
model_object.id))
end
end
The above will work for any standard one_to_many association:
Artist.one_to_many :albums, :extend=>FindOrCreate
# Create an album named Foo related to this artist,
# unless such an album already exists
Artist.first.albums_dataset.find_or_create(:name=>'Foo')
Before, the only way to do the above was to use a closure inside
the :dataset option proc, which couldn't be done generically
for multiple associations.
* A :conditions association option was added, which allows simple
filters to be set up without defining :graph_conditions and
an association block:
# 2.10.0
one_to_many(:japanese_verses, :class=>:Verse, \
:graph_conditions=>{:languageid=>3})do |ds|
ds.filter(:languageid=>3)
end
# 2.11.0
one_to_many(:japanese_verses, :class=>:Verse, \
:conditions=>{:languageid=>3})
* A :clone association option was added, which allows you to clone
an existing association. This is most useful when you are dealing
with a legacy schema and had to define the same options redundantly
for each type of association. You can now do:
many_to_many :deputies, :class=>:Employee, \
:join_table=>:employeecurrentaudits, :left_key=>:currentauditid, \
:right_key=>:employeeid, :order=>[:firstname, :lastname] do |ds|
ds.filter(:active).filter(:capacity=>1)
end
many_to_many :project_managers, :clone=>:deputies do |ds|
ds.filter(:active).filter(:capacity=>2)
end
many_to_many :team_leaders, :clone=>:deputies do |ds|
ds.filter(:active).filter(:capacity=>3)
end
All of the above would use the same :class, :join_table, :left_key,
:right_key, and :order options. If you don't provide an
association block, but you are cloning an association that has one,
the cloned association's block is used. You can use the
:block=>nil option to not use a block even if the cloned
association has a block.
* Dataset#select, #select_more, #order, #order_more, and #get all
take a block that yields a Sequel::SQL::VirtualRow instance,
similar to the behavior of filter. This allows for the easier
use of SQL functions on Ruby 1.9:
# 2.10.0
dataset.select(:prettify.sql_function(:name))
# 2.11.0
dataset.select{|o| o.prettify(:name)}
* String#lit can now accept arguments and return an SQL literal
string. This allows you to do things that were previously hard
or at least unnecessarily verbose. For example, you can now
easily use the SQL standard SUBSTRING function:
column = :user
pattern = params[:pattern]
dataset.select{|o| o.substring('? from ?'.lit(column, pattern))}
* A validates_inclusion_of validation method was added to Model. You
can provide a Range or an Array in the :in option to specify the
allowed values:
validates_inclusion_of :value, :in=>1..5
validates_inclusion_of :weekday, :in=>%w'Monday Tuesday ...'
* Dataset#with_sql was added, which returns a copy of the dataset
with static SQL. This is useful if you want to keep the same
row_proc/graph/transform/etc., but want to use your own custom
SQL.
Other Improvements
------------------
* You can now use Sequel's database independent types when casting:
dataset.select(:number.cast(String))
Among other things, the default cast types for cast_string and
cast_numeric now work in the MySQL adapter.
* Model#set_associated_object was added. The many_to_one association
setter method calls it. This allows you to easily override the
association setters for all many_to_one associations of a class
by modifying a single method.
* Typecasting invalid date strings now raises a
Sequel::Error::InvalidValue instead of an argument error, which
means that you can use raise_on_typecast_failure = false and not
have an error raised when an invalid date format is used.
* String#to_sequel_blob was added and should now be used instead
of String#to_blob. sqlite3-ruby defines String#to_blob
differently, which could cause problems.
* Blob columns are now fully supported in the SQLite adapter, with
the hex escape syntax being used for input, and returning columns
of type Sequel::SQL::Blob on output.
* The SQLite adapter drop_column support is now significantly more
robust.
* The SQLite adapter now supports rename_column.
* The MySQL adapter now supports stored procedures with multiple
arguments.
* The MySQL adapter can now not use a compressed connection to the
server via the :compress=>false option.
* The MySQL adapter now sets a default timeout of 30 days to the
database connection, you can change it via the :timeout option,
which accepts a number of seconds.
* The MySQL adapter now sets SQL_AUTO_IS_NULL to false by default,
you can use the :auto_is_null=>true option to not do this.
* The MySQL adapter now sets the encoding option on the database
connection itself, so it works across reconnects.
* Sequel itself no longer uses String#lit or Symbol#* internally, so
it shouldn't break if another library defines them.
* The default index name is now generated correctly if a non-String
or Symbol column is used.
* Some ruby -w warnings have been fixed.
* INSERTs are now sent to the master database instead of the slave
database(s) if using a master/slave database configuration and
PostgreSQL 8.2+ or Firebird.
* DateTime literalization has been fixed in the Firebird adapter.
* Date literalization has been fixed in the H2 JDBC subadapter.
* Release notes for versions from 1.0 to the present are now included
in the Sequel repository and the RDoc documentation, see
http://sequel.rubyforge.org/rdoc/files/doc/release_notes/
Backwards Compatibilty
----------------------
* The optimization of Model.[] may break if you modify the model's
dataset behind its back. Always use Model.set_dataset if you
want to change a Model's dataset.
* Sequel::Dataset::UnsupportedExceptIntersect and
Sequel::Dataset::UnsupportedExceptIntersectAll will now only be
defined if you are using an adapter that requires them.
* The private Model#cache_delete_unless_new method has been removed.
* Sequel::SQL::IrregularFunction was removed, as it was a bad hack
that is not used by Sequel anymore. Unless you were instantiating
it directly or using a plugin/extension that did, this shouldn't
affect you. Using a Sequel::SQL::Function with a
Sequel::SQL::PlaceholderLiteralString is recommended instead, see
the substring example above.
sequel-5.63.0/doc/release_notes/2.12.0.txt 0000664 0000000 0000000 00000046263 14342141206 0017771 0 ustar 00root root 0000000 0000000 Overview
--------
Sequel 2.12 is really just a stepping stone to Sequel 3.0, which will
be released next month. All major changes currently planned for 3.0
have been made in 2.12, but 2.12 contains many features that have
been deprecated and will be removed or moved into extensions or
plugins in 3.0.
Deprecation Logging
-------------------
If you use a deprecated method or feature, Sequel will by default
print a deprecation message and 10 lines of backtrace to standard
error to easily allow you to figure out which code needs to be
updated. You can change where the deprecation messages go and how
many lines of backtrace are given using the following:
# Log deprecation information to a file
Sequel::Deprecation.output = File.open('deprecated.txt', 'wb')
# Use 5 lines of backtrace when logging deprecation messages
Sequel::Deprecation.backtraces = 5
# Use all backtrace lines when logging deprecation messages
Sequel::Deprecation.backtraces = true
# Don't include backtraces in the deprecation logging
Sequel.Deprecation.backtraces = false
# Turn off all deprecation logging
Sequel::Deprecation.output = nil
Deprecated Features Moving to Extensions
----------------------------------------
* Migrations are being moved into sequel/extensions/migration. There
isn't any reason that they should be loaded in normal use since
they are used so rarely. The sequel command line tool uses this
extension to run the migrations.
* Adding the blank? method to all objects has been moved into
sequel/extensions/blank.
* Dataset#print and Sequel::PrettyTable have been moved into
sequel/extensions/pretty_table.
* Dataset#query and related methods have been moved into
sequel/extensions/query.
* Dataset#paginate and related methods have been moved into
sequel/extensions/pagination.
* String inflection methods (e.g. "people".singularize) have been
moved into sequel/extensions/inflector.
* String date/time conversion methods (e.g. '2000-01-01'.to_date)
have been moved into sequel/extensions/string_date_time.
Deprecated Model Features Moving to Plugins
-------------------------------------------
* Model validation class methods have been moved to a plugin. Sequel
users are encouraged to write their own validate instance method
instead. A new validation_helpers plugin has been added to make
this easier, it's explained in the New Features section. If you
want to continue using the validation class methods:
Sequel::Model.plugin :validation_class_methods
* Model hook class methods have been moved to a plugin. Sequel users
are encouraged to write their own hook instance methods, and call
super to get hooks specified in superclasses or plugins. If you
want to continue using the hook class methods:
Sequel::Model.plugin :hook_class_methods
* Model schema methods (e.g. Model.set_schema, Model.create_table,
Model.drop_table) have been moved to a plugin. The use of these
methods has been discouraged for a long time. If you want to use
them:
Sequel::Model.plugin :schema
* Model.set_sti_key has been moved to a plugin. So you should
change:
MyModel.set_sti_key :key_column
to:
MyModel.plugin :single_table_inheritance, :key_column
* Model.set_cache has been moved to a plugin. So you should change:
MyModel.set_cache cache_store, opts
to:
MyModel.plugin :caching, cache_store, opts
* Model.serialize has been moved to a plugin. So you should change:
MyModel.serialize :column, :format=>:yaml
to:
MyModel.plugin :serialization, :yaml, :column
Because the previous serialization support depended on dataset
transforms, the new serialization support is implemented
differently, and behavior may not be identical in all cases.
However, this should be a drop in replacement for most users.
Deprecated Features To Be Removed in Sequel 3.0
-----------------------------------------------
* Dataset#transform is deprecated without any replacement planned.
It was announced on the Sequel mailing list that transforms would
be removed unless someone said they needed them, and nobody said
that they did.
* Dataset#multi_insert and Dataset#import are no longer aliases
of each other. Dataset#multi_insert now takes an array of hashes,
and Dataset#import now takes an array of columns and an array
of arrays of values. Using multi_insert with import's API or
vice-versa is deprecated.
* Calling Dataset#[] with no arguments or an integer argument is
deprecated.
* Calling Dataset#map with both an argument and a block is
deprecated.
* Database#multi_threaded? and Database#logger are both deprecated.
* Calling Database#transaction with a symbol to specify which server
to use is deprecated. You should now call it with an option hash
with a :server key.
* Array#extract_options! and Object#is_one_of? are both deprecated.
* The metaprogramming methods taken from metaid are deprecated and
have been moved into Sequel::Metaprogramming. If you want them
available to specific objects/classes, just include or extend with
Sequel::Metaprogramming. If you want all objects to have access to
the metaprogramming methods, install metaid. Note that the
class_def method from metaid doesn't exist in
Sequel::Metaprogramming, since it really isn't different from
define_method (except it is public instead of private).
* Module#class_attr_overridable, #class_attr_reader, and
#metaalias are deprecated.
* Using Model#set or #update when the columns for the model are not
set and you provide a hash with symbol keys is deprecated.
Basically, you must have setter methods now for any columns used in
#set or #update.
* Model#set_with_params and #update_with_params are deprecated, use
#set and #update instead.
* Model#save! is deprecated, use #save(:validate=>false).
* Model.is and Model.is_a are deprecated, use Model.plugin.
* Model.str_columns, Model#str_columns, #set_values, and
#update_values are deprecated. You should use #set and
#update instead of #set_values and #update_values, though they
operate differently.
* Model.delete_all, Model.destroy_all, Model.size, and Model.uniq
are deprecated, use .delete, .destroy, .count, and .distinct.
* Model.belongs_to, Model.has_many, and Model.has_and_belongs_to_many
are deprecated, use .many_to_one, .one_to_many, and .many_to_many.
* Model#dataset is deprecated, use Model.dataset.
* SQL::CastMethods#cast_as is deprecated, use #cast.
* Calling Database#schema without a table argument is deprecated.
* Dataset#uniq is deprecated, use Dataset#distinct.
* Dataset#symbol_to_column_ref is deprecated, use #literal.
* Dataset#quote_column_ref is deprecated, use #quote_identifier.
* Dataset#size is deprecated, use #count.
* Passing options to Dataset#each, #all, #single_record,
#single_value, #sql, #select_sql, #update, #update_sql, #delete,
#delete_sql, and #exists is deprecated. Modify the options first
using clone or a related method, then call one of the above
methods.
* Dataset#create_view and #create_or_replace_view are deprecated,
use the database methods instead.
* Dataset.dataset_classes, #model_classes, #polymorphic_key, and
#set_model are deprecated.
* Database#>> is deprecated.
* String#to_blob and SQL::Blob#to_blob are deprecated, use
#to_sequel_blob.
* The use of Symbol#| to create array subscripts is deprecated,
use Symbol#sql_subscript.
* Symbol#to_column_ref is deprecated, use Dataset#literal.
* String#expr is deprecated, use String#lit.
* Array#to_sql, String#to_sql, and String#split_sql are deprecated.
* Passing an array to Database#<< is deprecated.
* Range#interval is deprecated.
* Enumerable#send_each is deprecated.
* When using ruby 1.8, Hash#key is deprecated.
* Sequel.open is deprecated, use Sequel.connect.
* Sequel.use_parse_tree and Sequel.use_parse_tree= are deprecated.
* All upcase_identifier methods and the :upcase_identifiers database
option are deprecated, use identifier_input_method = :upcase
instead.
* Using a virtual row block without an argument is deprecated, see
Sequel.virtual_row_instance_eval= under New Features.
* When using the JDBC adapter, Java::JavaSQL::Timestamp#usec is
deprecated. Sequel has returned Java::JavaSQL::Timestamp as
DateTime or Time for a few versions, so this shouldn't affect most
people.
* Sequel will no longer require bigdecimal/util, enumerator, or yaml
in 3.0. If you need them in your code, make sure you require
them yourself. Using features added by requiring these standard
libaries will not bring up a deprecation warning, for obvious
reasons.
* Sequel::Error::InvalidTransform, Sequel::Error::NoExistingFilter,
and Sequel::Error::InvalidStatement exceptions will be removed in
Sequel 3.0. You will not get a deprecation message if you reference
them in 2.12.
* Sequel::Model::Validation::Errors is deprecated, use
Sequel::Model::Errors instead. Referencing the old name will not
bring up a deprecation message.
New Features
------------
* Sequel.virtual_row_instance_eval= was added, which lets you give
Sequel 2.12 the behavior that will be the standard in 3.0.
It changes blocks passed to Dataset#filter, #select, or #order that
don't accept arguments (or accept any number of arguments) to
instance eval the block in the context of a new VirtualRow
instance instead of passing a new VirtualRow instance to the block.
It allows you to change code that looks like this:
dataset.filter{|o| (o.number > 10) & (o.name > 'M')}
to:
dataset.filter{(number > 10) & (name > 'M')}
When instance_eval is used, only local variables are available
to the block. Any calls to instance methods will be interpreted
as calling VirtualRow#method_missing, which generates identifiers
or functions. When virtual_row_instance_eval is enabled, the
following type of code will break:
# amount is a instance method
dataset.filter{:number + amount > 0}
Just like this example, the only type of code that should break is
when a virtual row block was used when it wasn't necessary (since
it doesn't use the VirtualRow argument).
When Sequel.virtual_row_instance_eval = false, using a virtual row
block that doesn't accept an argument will cause a deprecation
message.
Here's a regular expression that should catch most places where you
are using a virtual row block without an argument.
egrep -nr '[^A-Za-z0-9_](filter|select|select_more|order|order_more|get|where|having|from|first|and|or|exclude|find|subset|constraint|check)( *(\([^)]*\) *)?){*[^|]' *
An RDoc page explaining virtual row blocks was added as well.
* A validation_helpers model plugin was added that allows you to do
validations similar to the old class level validations inside
the Model#validate instance method. The API has changed, but it's
capable of most of the same validations. It doesn't handle
acceptance_of or confirmation_of validations, as those shouldn't be
handled in the model.
# Old class level validations
validates_format_of :col, :with=>/.../
validates_length_of :col, :maximum=>5
validates_length_of :col, :minimum=>3
validates_length_of :col, :is=>4
validates_length_of :col, :within=>3..5
validates_not_string :col
validates_numericality_of :col
validates_numericality_of :col, :only_integer=>true
validates_presence_of :col
validates_inclusion_of :col, :in=>[3, 4, 5]
validates_uniqueness_of :col, :col2
validates_uniqueness_of([:col, :col2])
# New instance level validations
def validate
validates_format /.../, :col
validates_max_length 5, :col
validates_min_length 3, :col
validates_exact_length 4, :col
validates_length_range 3..5, :col
validates_not_string :col
validates_numeric :col
validates_integer :col
validates_presence :col
validates_includes([3,4,5], :col)
validates_unique :col, :col2
validates_unique([:col, :col2])
end
Another change made is to specify the same type of validation on
multiple attributes, you must use an array:
# Old
validates_length_of :name, :password, :within=>3..5
# New
def validate
validates_length_range 3..5, [:name, :password]
end
The :message, :allow_blank, :allow_missing, and :allow_nil options
are still respected. The :tag option is not needed as instance level
validations work with code reloading without workarounds. The :if
option is also not needed for instance level validations:
# Old
validates_presence_of :name, :if=>:new?
validates_presence_of :pass, :if=>{flag > 3}
# New
def validate
validates_presence(:name) if new?
validates_presence(:pass) if flag > 3
end
The validates_each also doesn't have an equivalent instance method,
since it is much easier to just write your own validation:
# Old
validates_each(:date) do |o,a,v|
o.errors.add(a, '...') unless v > Date.today
end
# New
def validate
errors.add(:date, '...') unless date > Date.today
end
* MySQL adapter datasets now have on_duplicate_key_update and
insert_ignore methods which modify the SQL used to support
ON DUPLICATE KEY UPDATE and INSERT INGORE syntax in multi_insert
and import.
* If you use the MySQL native adapter, you can set:
Sequel::MySQL.convert_invalid_date_time = nil
to return dates like "0000-00-00" and times like "25:00:00" as
nil values instead of raising an error. You can also set it
to :string to return the values as strings.
* You can now use Sequel without modifying any core classes, by
defining a SEQUEL_NO_CORE_EXTENSIONS constant or environment
variable. In 2.12, this may still add some deprecated methods to
the core classes, but in 3.0 no methods will be added to the core
classes if you use this.
* You can now use Sequel::Model without the associations
implementation by defining a SEQUEL_NO_ASSOCIATIONS constant or
environment variable.
Other Improvements
------------------
* Model column accessors have been made faster and the overhead of
creating them has been reduced significantly.
* ~{:bool_col=>true} now generates an bool_col IS NOT TRUE filter
instead of bool_col != TRUE. This makes it return records with
NULL values. If you only want to have false records, you should
use {:bool_col=>false}. This works better with SQL's 3 valued
boolean logic.
It is slightly inconsistent with ~{:col=>1}, since that won't
return values where col is NULL, but it gives the user the ability
to create an IS [NOT] (TRUE|FALSE) filter, which Sequel previously
did not support.
If you really want the old behavior, you can change it to
~{true=>:bool_col}.
* Model.use_transactions was added for setting whether model objects
should use transactions when destroying or saving records. Like
most Sequel options, it's settable on a global, per model, and
per object basis:
Sequel::Model.use_transactions = false
MyModel.use_transactions = true
my_model.use_transactions = false
You can also turn it on or off for specific save calls:
my_model.save(:transaction=>true)
* The Oracle adapter now supports schema parsing.
* When using Model.db=, all current dataset options are copied to
a new dataset created with the new db.
* Model::Errors#count was refactored to improve performance.
* Most exception classes that were located under Sequel::Error are
now located directly under Sequel. The old names are not
deprecated (unless mentioned above), but their use is now
discouraged. The exceptions have the same name except that
Sequel::Error::PoolTimeoutError changed to Sequel::PoolTimeout.
* Dataset#where now always affects the WHERE clause. Before, it
was just an alias of filter, so it modified the HAVING clause
if the dataset already had a HAVING clause.
* The optimization of Model.[] introduced in 2.11.0 broke on
databases that didn't support LIMIT. The optimization now works
on those databases.
* All of the the RDoc documentation was reviewed and many updates
were made, resulting in significantly improved documentation
quality.
* Model.def_dataset_method now works when the model doesn't have an
associated dataset, as it will add the method to a dataset
given to set_dataset in the future.
* Database#get and #select now take a block that is passed to
the dataset they create.
* You can disable the use of INSERT RETURNING in the shared
PostgreSQL adapter using disable_insert_returning. This is mostly
useful if you are inserting a large number of records.
* A bug relating to aliasing columns in the JDBC adapter has been
fixed.
* Sequel can now create and drop schema-qualified views.
* Performance of Dataset#destroy for model datasets was improved.
* The specs now run on Rspec 1.2.
* Internal dependence on the methods that Sequel adds to core
classes has been eliminated, any internal use of methods that
Sequel adds to the core classes is now considered a bug.
* A possible bug where Database#rename_table would not remove a
cached schema entry has been fixed.
* The Oracle and MySQL adapters now raise an error as soon as you
call distinct on a dataset, instead of waiting until the SQL is
generated.
Backwards Compatibilty
----------------------
* Saving a newly inserted record in an after_create or after_save
hook is no longer broken. It broke in 2.10 as a side effect of
allowing the hook to check whether or not the record was a new
record. The code has been changed so that a @was_new instance
variable will be defined to true if the record was just created.
Similarly, instead of not modifying changed_columns until after
the after hooks run, a @columns_updated instance variable will
be available in the after hooks that is a hash of exactly what
attribute keys and values were used in the update.
These changes break compatibility with 2.11.0 and 2.10.0, but
restore compatibility with 2.9.0 and previous versions.
* PostgreSQL no longer uses savepoints for nested transactions by
default. If you want to use a savepoint, you have to pass the
:savepoint option to the transaction method. Using savepoints
by default broke expectations when a method raised Rollback to
rollback the transaction, and it only rolled back to the last
savepoint.
* The anonymous model classes created by Sequel::Model() are now
stored in Model::ANONYMOUS_MODEL_CLASSES instead of the @models
class instance variable of the main module.
* The mappings of adapter schemes to classes are now stored in
Sequel::ADAPTER_MAP instead of the Database @@adapters class
variable.
* Model instances no longer contain a reference to their class's
@db_schema.
* Database schema sql methods (e.g. alter_table_sql) are now private.
* Database#[] no longer accepts a block. It's not possible to call
it with a block in general usage, anyway.
* The Sequel::Schema::SQL module no longer exists, the methods it
included were placed directly in the Sequel::Database class.
* The Sequel::SQL::SpecificExpression class has been removed,
subclasses now inherit from Sequel::SQL::Expression.
* Sequel now requires its own files with an absolute path.
* The file hierarchy of the sequel library changed significantly.
sequel-5.63.0/doc/release_notes/2.2.0.txt 0000664 0000000 0000000 00000022515 14342141206 0017702 0 ustar 00root root 0000000 0000000 The Most Powerful and Flexible Associations of Any Ruby ORM
-----------------------------------------------------------
Sequel can now support any association type supported by
ActiveRecord, and many association types ActiveRecord doesn't
support.
Association callbacks (:before_add, :after_add, :before_remove,
:after_remove) have been added, and work for all association
types. Each of the callback options can be a Symbol specifying an
instance method that takes one argument (the associated object), or a
Proc that takes two arguments (the current object and the associated
object), or an array of Symbols and Procs. Additionally, an
:after_load callback is available, which is running after loading the
associated record(s) from the database.
Association extensions are now supported:
class FindOrCreate
def find_or_create(vals)
first(vals) || create(vals)
end
end
class Author < Sequel::Model
one_to_many :authorships, :extend=>FindOrCreate
end
Author.first.authorships_dataset.find_or_create(:name=>'Bob')
Sequel has been able to support most has_many :through style
associations since 1.3, via many_to_many (since it doesn't break on
join tables that are also model tables, unlike ActiveRecord's
has_and_belongs_to_many). Now it can also support has_many :through
style associations where it goes through a has_many association.
Sequel can now support polymorphic associations. Polymorphic
associations are really a design flaw, so Sequel doesn't support them
directly, but the tools that Sequel gives you make them pretty easy
to implement.
Sequel can also support associations that ActiveRecord does not. For
example, a belongs_to association where the column referenced in the
associated table is not the primary key, an association that depends
on multiple columns in each table, or even situations where the
association has a column in the primary table that can be referenced
by any of multiple columns in a second table that has a has_one style
association with the table you want to associate with.
Some of those associations can be supported for a single object using
custom SQL in ActiveRecord, but none are supported when eager
loading or allow further filtering.
Not only can all of these cases be supported with Sequel::Model, all
can be supported with eager loading, and can allow for further
filtering. See
http://sequel.rubyforge.org/files/sequel/doc/advanced_associations_rdoc.html
for details and example code for all association types covered above.
There have also been many additional options added for controlling
eager loading via eager_graph. Every part of the SQL JOINs can now
be controlled via one of the options, so you can use JOIN USING,
NATURAL JOIN, or arbitrary JOIN ON conditions.
Finally, just to show off the power that Sequel gives you when eager
loading, here is example code that will eagerly load all descendants
and ancestors in a tree structure, without knowing the depth of the
tree:
class Node < Sequel::Model
set_schema do
primary_key :id
foreign_key :parent_id, :nodes
end
create_table
many_to_one :parent
one_to_many :children, :key=>:parent_id
# Only useful when eager loading
many_to_one :ancestors, :eager_loader=>(proc do |key_hash, nodes,
associations|
# Handle cases where the root node has the same parent_id as
primary_key
# and also when it is NULL
non_root_nodes = nodes.reject do |n|
if [nil, n.pk].include?(n.parent_id)
# Make sure root nodes have their parent association set to
nil
n.associations[:parent] = nil
true
else
false
end
end
unless non_root_nodes.empty?
id_map = {}
# Create an map of parent_ids to nodes that have that parent id
non_root_nodes.each{|n| (id_map[n.parent_id] ||= []) << n}
# Doesn't cause an infinte loop, because when only the root node
# is left, this is not called.
Node.filter(Node.primary_key=>id_map.keys).eager(:ancestors).all
do |node|
# Populate the parent association for each node
id_map[node.pk].each{|n| n.associations[:parent] = node}
end
end
end)
many_to_one :descendants, :eager_loader=>(proc do |key_hash, nodes,
associations|
id_map = {}
nodes.each do |n|
# Initialize an empty array of child associations for each
parent node
n.associations[:children] = []
# Populate identity map of nodes
id_map[n.pk] = n
end
# Doesn't cause an infinite loop, because the :eager_loader is not
called
# if no records are returned. Exclude id = parent_id to avoid
infinite loop
# if the root note is one of the returned records and it has
parent_id = id
# instead of parent_id = NULL.
Node.filter(:parent_id=>id_map.keys).exclude(:id=>:parent_id).eager(:descendants).all
do |node|
# Get the parent from the identity map
parent = id_map[node.parent_id]
# Set the child's parent association to the parent
node.associations[:parent] = parent
# Add the child association to the array of children in the
parent
parent.associations[:children] << node
end
end)
end
nodes = Node.filter(:id < 10).eager(:ancestors, :descendants).all
New Adapter Features
--------------------
* PostgreSQL bytea fields are now fully supported.
* The PostgreSQL adapter now uses the safer connection-specific
string escaping if you are using ruby-pg.
* The SQLite adapter supports drop_column and add_index.
* You can now use URL parameters in the connection string, enabling
you to connect to PostgreSQL via a socket using
postgres://user:password@blah/database?host=/tmp
Other New Features
------------------
* Dataset#graph now takes a block which it passes to join_table.
* Symbol#identifier has been added, which can be used if another
library defines the same operator(s) on Symbol that Sequel defines.
* Filter blocks now yield a VirtualRow instance, which can yield
Identifiers, QualifiedIdentifiers, or Functions. Like
Symbol#identifier, this is useful if another library defines the
same operator(s) on Symbol that Sequel defines.
* You can now call Model.to_hash to get an identity map for all
rows (before this required Model.dataset.to_hash).
* A model that can get it's column information from the schema will
set it in the dataset, potentially saving many queries.
* Model.validates_presence_of now works correctly for boolean
columns.
Notable Bug Fixes
-----------------
* Caching now works with Model subclasses.
* Model validation methods now work with source reloading.
* The PostgreSQL adapter no longer raises an Error if you try to
insert a record with the primary key already specified.
* Sequel no longer messes with the native MySQL adapter, so you can
use Sequel and ActiveRecord with MySQL in the same process.
* Dataset#count now works correctly for limited dataset.
* PostgreSQL Database#transaction method yields a connection, similar
to the other adapters.
* Using a hash argument in #distinct, #order, or #group is treated
as an expression instead of a column alias.
* Cloned datasets no longer ignore the existing columns unless it is
necessary.
* The :quote_identifiers and :single_threaded Database options now
work correctly.
Backwards Incompatible Changes
------------------------------
* ParseTree support, deprecated in 2.1.0, has been removed in 2.2.0.
You should use the expression filter syntax instead, perferably
without the block (though it can be used inside a block as well).
This usually involves the following types of changes:
filter{:x == :y} => filter(:x => :y)
filter{:x << :y} => filter(:x => :y)
filter{:x && :y} => filter(:x & :y) # Don't forget about change
filter{:x || :y} => filter(:x | :y) # in operator precedence
filter{:x.like?('%blah%')} => filter(:x.like('%blah%'))
filter do => filter((:x > 1) & (:y < 2))
:x > 1
:y < 2
end
* Attempts to save an invalid Model instance will raise an error by
default. To revert to returning a nil value, use:
Sequel::Model.raise_on_save_failure = false # Global
Album.raise_on_save_failure = false # Class
album = Album.new
album.raise_on_save_failure = false # Instance
Note that before, save would return false where now it returns nil
if you disable raising on save failure.
* Dataset#update no longer takes a block, as it's use of the block
depended on ParseTree. With the introduction of the expression
syntax in 2.0.0, it's no longer necessary. You should use a hash
with an expression as the value instead:
DB[:table].update(:column=>:column + 1)
* validates_presence of now considers false as present instead of
absent. This is so it works with boolean columns.
* Dataset#graph ignores any previously selected columns when it is
called for the first time.
* Dataset#columns ignores any filtering, ordering, or distinct
clauses. This shouldn't cause issues unless you were using
SQL functions with side effects and expecting them to be called
when columns was called (unlikely at best).
One significant point of note is that the 2.2.0 release will be the
last release with both a sequel_core and sequel gem. Starting
with 2.3.0 they will be combined into one sequel gem. You will still
be able to get just the sequel_core part by requiring 'sequel_core',
but they will be packaged together.
sequel-5.63.0/doc/release_notes/2.3.0.txt 0000664 0000000 0000000 00000006456 14342141206 0017711 0 ustar 00root root 0000000 0000000 JRuby and Ruby 1.9 Officially Supported
---------------------------------------
Sequel now officially supports JRuby 1.1.3 and Ruby 1.9 (svn revision
18194 at least). Using JRuby with the JDBC adapter, PostgreSQL,
MySQL, and SQLite now enjoy almost full support, though not
everything works the same as using the native adapter. Depending on
what you are doing, it may make sense to use postgres-pr on JRuby
instead of PostgreSQL-JDBC.
To use the new JDBC support, the database connection string you give
Sequel is now passed directly to JDBC, here are a few examples:
Sequel.connect('jdbc:postgresql://host/database?user=*&password=*')
Sequel.connect('jdbc:mysql://host/database?user=*&password=*')
Sequel.connect('jdbc:sqlite::memory:')
Sequel.connect('jdbc:sqlite:relative/path.db')
Sequel.connect('jdbc:sqlite:/absolute/path.db')
Single Gem
----------
Sequel is now distributed as a single gem named sequel, by combining
the previous sequel_core and sequel gems. You can still just
"require 'sequel_core'" if you don't want the model functionality.
Database Adapter Improvements
-----------------------------
* Dataset#empty? now works using the MySQL adapter.
* The Oracle adapter now works with a nonstandard database port.
* The JDBC adapter should load JDBC drivers automatically for
PostgreSQL, MySQL, SQLite, Oracle, and MSSQL. For PostgreSQL,
MySQL, and SQLite, the jdbc-* gem can be used, for the others, you
must have the correct .jar in your CLASSPATH.
* The PostgreSQL adapter no longer raises an error when inserting
records into a table without a primary key.
* Database#disconnect now works for the ADO adapter.
* The ADO adapter no longer raises an error if the dataset contains
no records.
* The ODBC adapter no longer errors when converting ::ODBC::Time
values.
Backwards Incompatible Changes
------------------------------
* Sequel::Worker has been removed. There are no known users, and the
specs caused problems on JRuby.
* Assigning an empty string to a non-string, non-blob model attribute
converts it to nil by default. You can use
"Model.typecast_empty_string_to_nil = false" to get the old
behavior. This should make web development with Sequel
significantly easier, hopefully at no expense to other uses.
* Database.uri_to_options is now a private class method.
* Model.create_table! now acts the same as Database.create_table!,
dropping the table unconditionally and then creating it. This was
done for consistency. If you are using Model.create_table! in
production code, you should change it to
"Model.create_table unless Model.table_exists?", otherwise you risk
wiping out your production data. I recommended you use the
migration feature instead of Model.set_schema, as that handles
altering existing tables.
Other Notable Changes
---------------------
* Using validates_length_of more than once on the same attribute with
different options without a tag no longer causes the first use to
be ignored. This was a side effect of the validation tags added
in 2.2.0.
* Other than the adapters, Sequel now has 100% code coverage (line
coverage).
* Model#set* methods now return self.
* An integration test suite was added, testing Sequel against a live
database with nothing mocked, which helped greatly when testing the
new support for JDBC adapters.
sequel-5.63.0/doc/release_notes/2.4.0.txt 0000664 0000000 0000000 00000007243 14342141206 0017705 0 ustar 00root root 0000000 0000000 Prepared Statements/Bound Variables
===================================
Sequel now supports prepared statements and bound variables. No
matter which database you are using, Sequel uses exactly the same API.
To specify placeholders, you use the :$placeholder syntax:
ds = DB[:items].filter(:name=>:$n)
To use a bound variable:
ds.call(:select, :n=>'Jim')
This will do the equivalent of selecting records that have the name
'Jim'. In addition to :select, you can use :first or :delete. There
is also support for bound variables when inserting or updating
records:
ds.call(:update, {:n=>'Jim', :new_n=>'Bob'}, :name=>:$new_n)
Which will update all records that have the name 'Jim' to have the
name 'Bob'.
Prepared statement support is very similar to bound variable support,
except that the statement is first prepared with a name:
ps = ds.prepare(:select, :select_by_name)
It is then called later with the bound arguments to use:
ps.call(:n=>'Jim')
DB.call(:select_by_name, :n=>'Jim') # same as above
For inserting or updating, the hash to use when inserting or updating
is given to prepare:
ps2 = ds.prepare(:update, :update_name, :name=>:$new_n)
ps2.call(:n=>'Jim', :new_n=>'Bob')
There is some level of native support for these features in the
PostgreSQL, MySQL, SQLite, and JDBC adapters. For other adapters,
support is emulated, but it shouldn't be too difficult to add native
support for them.
For more details see:
http://sequel.rubyforge.org/rdoc/files/doc/prepared_statements_rdoc.html
Read-Only Slave/Writable Master and Database Sharding
=====================================================
Sequel now has built in support for master/slave database
configurations, just by setting an option in Sequel.connect:
DB=Sequel.connect('postgres://master_server/database', \
:servers=>{:read_only=>{:host=>'slave_server'}})
That will use slave_server for SELECT queries and master_server for
other queries. It's fairly easy to use multiple slaves or even
multiple masters, examples are included in the link below.
Sharding support requires some code other than the database
configuration, but is still fairly simple. For example, to set up
a 16 shard configuration based on a hex character:
servers = {}
(('0'..'9').to_a + ('a'..'f').to_a).each do |hex|
servers[hex.to_sym] = {:host=>"hash_host_#{hex}"}
end
DB=Sequel.connect('postgres://hash_host/hashes', :servers=>servers)
To set which shard to use for a query, use the Dataset#server method:
DB[:hashes].server(:a).filter(:hash=>/31337/)
For more details see:
http://sequel.rubyforge.org/rdoc/files/doc/sharding_rdoc.html
Other Changes
=============
* The sequel.rubyforge.org website has a new design thanks to boof.
The online RDoc is now located at http://sequel.rubyforge.org/rdoc.
* Support was added for anonymous column names in the ADO adapter.
* Better MSSQL support in the ADO, ODBC, and JDBC adapters. The
odbc_mssql adapter has been removed. If you use MSSQL with ODBC,
please use the odbc adapter with a :db_type=>'mssql' option.
* The following Sequel::Error exception subclasses were removed:
InvalidExpression, InvalidFilter, InvalidJoinType, and WorkerStop.
* Documentation was added for the PostgreSQL, MySQL, SQLite, and
JDBC adapters.
* Various internal interfaces were refactored. For example, if you
use an adapter not included with Sequel, it probably won't work
until you update it to the new internal API.
* Many low level methods (such as Database#transaction), now take
an optional server argument to indicate which server to use.
* Model plugins that have a DatasetMethods module with non-public
methods no longer have Model methods created that call those
methods.
sequel-5.63.0/doc/release_notes/2.5.0.txt 0000664 0000000 0000000 00000011002 14342141206 0017672 0 ustar 00root root 0000000 0000000 New Features
------------
* The values that are used to insert/update records can now be
scoped similar to how filter expressions can be scoped.
set_defaults is used to set defaults which can be overridden,
and set_overrides is used to set defaults which cannot be
overridden:
DB[:t].set_defaults(:x=>1).insert_sql
# => INSERT INTO t (x) VALUES (1)
DB[:t].set_defaults(:x=>1).insert_sql(:x=>2)
# => INSERT INTO t (x) VALUES (2)
DB[:t].set_defaults(:x=>1).insert_sql(:y=>2)
# => INSERT INTO t (x, y) VALUES (1, 2)
DB[:t].set_overrides(:x=>1).insert_sql(:x=>2)
# => INSERT INTO t (x) VALUES (1)
The difference between set_defaults and set_overrides is that
with set_defaults, the last value takes precedence, while with
set_overrides, the first value takes precedence.
* The schema generators now support creating and altering tables
with composite primary and/or foreign keys:
DB.create_table(:items) do
integer :id
text :name
primary_key [:id, :name]
foreign_key [:id, :name], :other_table, \
:key=>[:item_id, :item_name]
end
DB.alter_table(:items) do
add_primary_key [:id, :name]
add_foreign_key [:id, :name], :other_table, \
:key=>[:item_id, :item_name]
end
* The AlterTableGenerator now supports unique constraints:
DB.alter_table(:items) do
add_unique_constraint [:aaa, :bbb, :ccc], :name => :con3
end
* The schema generators now support ON UPDATE (previously, they only
supported ON DELETE):
DB.create_table(:items) do
foreign_key :project_id, :projects, :on_update => :cascade
end
* When connecting to a PostgreSQL server version 8.2 and higher,
Sequel now uses the INSERT ... RETURNING ... syntax, which should
speed up row inserts on PostgreSQL. In addition, Sequel Models
use RETURNING * to speed up model object creation.
* You can now validate multiple attributes at once. This is useful
if the combination of two or more attribute values is important,
such as checking the uniqueness of multiple columns.
validates_uniqueness_of now supports this directly:
validates_uniqueness_of [:column1, :column2]
This protects against the database having multiple rows with the
same values for both :column1 and :column2. This is different
from:
validates_uniqueness_of :column1, :column2
Which checks that the value of column1 is unique in the table, and
that the value of column2 is unique in the table (which is much
more restrictive).
Other Improvements
------------------
* Dataset methods insert_sql, delete_sql, and update_sql respect the
:sql option, allowing you to do things such as:
ds = DB['INSERT INTO t (time) VALUES (CURRENT_TIMESTAMP)']
ds.insert
ds.insert
* The database adapters (at least MySQL, PostgreSQL, SQLite, and
JDBC) generally raise Sequel::DatabaseError for database problems,
making it easier to tell what is a true database error versus an
error raised by Sequel itself.
* Sequel uses the async features of ruby-pg so that the entire
interpreter is not blocked while waiting for the results of
queries.
* Sequel now supports the 2008.08.17 version of ruby-pg.
* MSSQL support has been improved when using the ODBC and ADO
adapters.
* Index names are quoted and creating or dropping indexes.
* Automatically generated column accessor methods no longer override
instance methods specified by plugins.
* Inserting a row with an already specified primary key inside a
transaction now works correctly when using PostgreSQL.
* before_save and before_update hooks now work as expected when using
save_changes.
* count and paginate now work correctly on graphed datasets.
Backwards Compatibility
-----------------------
* The SQLite adapter now raises Sequel::DatabaseError instead of
Sequel::Error::InvalidStatement whenever an SQLite3::Exception is
raised by the SQLite3 driver.
* Date and DateTime conversions now convert 2 digit years. To revert
to the previous behavior:
Sequel.convert_two_digit_years = false
Note that Ruby 1.8 and 1.9 handle Date parsing differently, so
there is no backwards compatibility change for Ruby 1.9. However,
this also means that the MM/DD/YY date syntax commonly used in the
United States is not always parsed correctly on Ruby 1.9, greatly
limiting the use of 2 digit year conversion.
* You can no longer abuse the SQL function syntax for specifying
database types. For example, you must change:
:type=>:varchar[255]
to:
:type=>:varchar, :size=>255
sequel-5.63.0/doc/release_notes/2.6.0.txt 0000664 0000000 0000000 00000014054 14342141206 0017705 0 ustar 00root root 0000000 0000000 New Features
------------
* Schema parsing was refactored, resulting in a huge speedup when
using MySQL. MySQL now uses the DESCRIBE statement instead of the
INFORMATION_SCHEMA. PostgreSQL now uses the pg_* system catalogs
instead of the INFORMATION schema.
* The schema information now includes the :primary_key field. Models
now use this field to automatically determine the primary key for
a table, so it no longer needs to be specified explicitly. Models
even handle the composite primary key case.
* The raise_on_typecast_failure switch was added, with it being true
by default (so no change in behavior). This allows the user to
silently ignore errors when typecasting fails, at the global, class,
and instance levels.
Sequel::Model.raise_on_typecast_failure = false # Global
Artist.raise_on_typecast_failure = true # Class
artist = Artist.new
artist.raise_on_typecast_failure = false # Instance
Album.raise_on_typecast_failure = true
Album.new(:numtracks=>'a') # => raises Sequel::Error::InvalidValue
Album.raise_on_typecast_failure = false
Album.new(:numtracks=>'a') # => #"a"}>
* Associations' orders are now respected when eager loading via
eager_graph. Sequel will qualify the columns in the order with
the alias being used, so you can have overlapping columns when
eager loading multiple associations.
Artist.one_to_many :albums, :order=>:name
Album.one_to_many :tracks, :order=>:number
Artist.order(:artists__name).eager_graph(:albums=>:tracks).sql
# => ... ORDER BY artists.name, albums.name, tracks.number
* The support for CASE expressions has been enhanced by allowing the
use of an optional expression:
{1=>2}.case(0, :x)
# => CASE x WHEN 1 THEN 2 ELSE 0 END
[[:a, 1], [:b, 2], [:c, 3]].case(4, :y)
# => CASE y WHEN a THEN 1 WHEN b THEN 2 WHEN c THEN 3 ELSE 4 END
Previously, to get something equivalent to this, you had to do:
{{:x=>1}=>2}.case(0)
# => CASE WHEN (x = 1) THEN 2 ELSE 0 END
[[{:y=>:a}, 1], [{:y=>:b}, 2], [{:y=>:c}, 3]].case(4)
# => CASE WHEN (y = a) THEN 1 WHEN (y = b) THEN 2 WHEN (y = c)
THEN 3 ELSE 4 END
* You can now change the NULL/NOT NULL value of an existing column
using the set_column_allow_null method.
# Set NOT NULL
DB.alter_table(:artists){set_column_allow_null :name, false}
# Set NULL
DB.alter_table(:artists){set_column_allow_null :name, true}
* You can now get the schema information for a table in a non-public
schema in PostgreSQL using the implicit :schema__table syntax.
Before, the :schema option had to be given explicitly to
Database#schema. This allows models to get schema information for
tables outside the public schema.
* Transactions are now supported on MSSQL.
* Dataset#tables now returns all tables in the database for MySQL
databases accessed via JDBC.
* Database#drop_view can now drop multiple views at once.
Other Improvements
------------------
* The SQLite adapter now respects the Sequel.datetime_class option
for timestamp and datetime columns.
* Adding a unique constraint no longer explicity creates a unique
index. If you want a unique index, use index :unique=>true.
* If no language is specified when creating a full text index on
PostgreSQL, the simple language is assumed.
* Errors when typecasting fails are now Sequel::Error::InvalidValue
instead of the more generic Sequel::Error.
* Specifying constraints now works correctly for all types of
arguments. Previously, it did not work unless a block or
interpolated string were used.
* Loading an association with the same name as a table in the FROM
clause no longer causes an error.
* When eagerly loading many_to_one associations where no objects have
an associated object, the negative lookup is now cached.
* String keys can now be used with Dataset#multi_insert, just like
they can be used for Dataset#insert.
* Dataset#join_table now generates the correct SQL when doing the
first join to a dataset where the first source is a dataset, when
an unqualified column is used in the conditions.
* Cascading associations after *_to_many associations can now be
eagerly loaded via eager_graph.
* Eagerly loading *_to_many associations that are cascaded behind a
many_to_one association now have their duplicates removed if a
cartesian product join is done.
* The SQLite adapter now uses string literals in all of the AS
clauses. While the SQL standard specifies that identifiers should
be used, SQLite documentation explicitly states that string
literals are expected (though it generally works with identifiers
by converting them implicitly).
* Database methods that modify the schema now remove the cached
schema entry.
* The hash keys that Database#schema returns when no table is
requested are now always supposed to be symbols.
* The generation of SQL for composite foreign keys on MySQL has been
fixed.
* A schema.rdoc file was added to the documentation explaining the
various parts of Sequel related to schema generation and
modification and how they interact
(http://sequel.rubyforge.org/rdoc/files/doc/schema_rdoc.html).
* The RDoc template for the website was changed from the default
template to the hanna template.
Backwards Compatibility
-----------------------
* The :numeric_precision and :max_chars schema entries have been
removed. Use the :db_type entry to determine this information,
if available.
* The SQLite adapter used to always return Time instances for
timestamp types, even if Sequel.datetime_class was DateTime. For
datetime types it always returned a DateTime instance. It
now returns an instance of Sequel.datetime_class in both cases.
* It's possible that the including of associations' orders when eager
loading via eager_graph could cause problems. You can use the
:order_eager_graph=>false option to not use the :order option when
eager loading via :eager_graph.
* There were small changes in SQL creation where the AS keyword is
now used explicitly. These should have no effect, but could break
tests for explicit SQL.
sequel-5.63.0/doc/release_notes/2.7.0.txt 0000664 0000000 0000000 00000014543 14342141206 0017711 0 ustar 00root root 0000000 0000000 Performance Optimizations
-------------------------
* Fetching a large number of records with the PostgreSQL adapter is
significantly faster (up to 3-4 times faster than before).
* Instantiating model objects has been made much faster, as many
options (such as raise_on_save_failure) are now lazily loaded, and
hook methods are now much faster if no hooks have been defined for
that type of hook.
New Association Options
-----------------------
* The :eager_grapher option has been added allowing you to supply
your own block to implement eager loading via eager_graph.
* many_to_one and one_to_many associations now have a :primary_key
option, specifying the name of the column that the :key option
references.
* many_to_many associations now have :left_primary_key and
:right_primary_key options, specifying the columns that :left_key
and :right_key reference, respectively.
* many_to_many associations now have a :uniq option, that adds an
:after_load callback that makes the returned array of objects
unique.
Other New Features
------------------
* Dataset#set_graph_aliases now allows you to supply a third argument
for each column you want graph into the dataset, allowing you to use
arbitrary SQL expressions that are graphed into the correct table:
ds.set_graph_aliases!(:a=>[:b, :c], :d=>[:e, :f, 42])
# SELECT b.c AS a, 42 AS d FROM ...
ds.first # => {:b=>{:c=>?}, :e=>{:f=>42}}
* Dataset#add_graph_aliases was added, that adds additional graph
aliases instead of replacing the existing ones (as
#set_graph_aliases does). It's basically the equivalent of
select_more for graphs.
* Dataset#join_table changed it's final argument from a symbol
specifying a table name to an option hash (with backwards
compatibility kept), and adds support for a :implicit_qualifier
option, which it uses instead of the last joined table to
qualify columns.
* Association's :after_load callbacks are now called when eager
loading via eager (but not when eager loading via eager_graph).
* Any expression can now be used as the argument to Symbol#like,
which means that you can pattern match columns to other columns.
Before, it always transformed the argument to a string.
:a.like(:b)
# 2.6.0: a LIKE 'b'
# 2.7.0: a LIKE b
* Array#sql_array was added, allowing you to specify that an array
in ruby be treated like an array in SQL. This is true anyway,
except for arrays of all two pairs, which are treated like hashes,
for specifying multiple conditions with the same key:
DB[:foo].filter([:a,:b] => [[1,2],[3,4]].sql_array)
# => SELECT * FROM foo WHERE ((a, b) IN ((1, 2), (3, 4)))
* ComplexExpression#== and #sql? were added, allowing for easier
testing.
* Full text searching on PostgreSQL now joins multiple columns with
a space, to prevent joining border words, and it works when there
is a match in one column but the other column is NULL.
Other Improvements
------------------
* Instance methods added by creating associations are added to an
anonymous module included by the class, so they can be overridden
in the class while still allowing the use of super to get the
default behavior (this is similar to column accessor methods).
* Many improvements were added to support using multiple schemas in
PostgreSQL.
* Model::Validation::Errors objects are now more compatible with
Rails, by adding a #count method and making #on return nil if there
are no error messages for that attribute.
* Serialized columns in models are no longer typecast.
* Associations are now inherited when a model class is subclassed.
* Many improvements were made that should make adding custom
association types easier.
* A corner case in eager_graph where the wrong table name would be
used to qualify a column name has been fixed.
* Dataset's cached column information is no longer modified if #each
is called with an option that modifies the columns.
* You should now be able to connect to Oracle via the JDBC adapter,
and with the same support it has when using the oracle adapter.
* Model.association_reflections is now a public methods, so you can
grab a hash of all association reflections at once (keyed by
association name symbol).
* The :encoding/:charset option now works in the PostgreSQL adapter
if the postgres-pr driver is used.
* The numeric(x,y) type is now interpreted as decimal.
Backwards Compatibilty
----------------------
* The first argument to Model#initialize must be a hash, you can no
longer use nil. For example, the following code will break if
:album is not in params:
Album.new(params[:album])
Additionally, Model#initialize does not call the block if the
second argument is true.
* The Sequel::Model.lazy_load_schema setting was removed. It should
no longer be necessary now that schema loading is relatively speedy,
and schemas can be loaded at startup and cached.
* The PostgreSQL adapter will default to using a unix socket in /tmp
if no host is specified. Before, a TCP/IP socket to localhost was
used if no host was specified. This change makes Sequel operate
similarly to the PostgreSQL command line tools.
* The ASSOCIATION_TYPES constant has changed from an array to a hash
and it has been moved. The RECIPROCAL_ASSOCIATIONS constant has
been removed. This is unlikely to matter unless you were using
custom association types.
* The PostgreSQL adapter now sets the PostgreSQL DateStyle, in order
to implement an optimization. To turn this off, set
Sequel::Postgres.use_iso_date_format = false.
* When using the PostgreSQL adapter, in many places the schema is
specified explicitly. If you do not specify a schema, a default
one is used (public by default). If you use a schema other than
public for your work, use Database#default_schema= to set it. For
any table outside of the default schema, you should specify the
schema explicitly, even if it is in the PostgreSQL search_path.
* Model::Validation::Errors#on now returns nil instead of [] if there
are no errors for an attribute.
* Hooks added to a superclass after a subclass has been created no
longer have an effect on the subclass.
* The Postgres.string_to_bool method has been removed.
* PostgreSQL full text searching now always defaults to using the
simple dictionary. If you want to use another dictionary, it must
be specified explicitly, both when searching and when creating a
full text index.
sequel-5.63.0/doc/release_notes/2.8.0.txt 0000664 0000000 0000000 00000014370 14342141206 0017710 0 ustar 00root root 0000000 0000000 New Features
------------
* Sequel now supports database stored procedures similar to its
support for prepared statements. The API is as follows:
DB[:table].call_sproc(:select, :mysp, 'param1', 'param2')
# or
sp = DB[:table].prepare_sproc(:select, :mysp)
sp.call('param1', 'param2')
sp.call('param3', 'param4')
This works with Model datasets as well, allowing them to return
model objects:
Album.call_sproc(:select, :new_albums)
#=> [#, #]
You can call a stored procedure directly on the Database object
if you want to, but the results and API are adapter dependent,
and you definitely shouldn't do it if the stored procedure returns
rows:
DB.call_sproc(:mysp, :args=>['param1', 'param2'])
Currently, the MySQL and JDBC adapters support stored procedures.
Other adapters may support them in a future version.
* The connection pool code can now remove connections if the
adapter raises a Sequel::DatabaseDisconnectError indicating that
the connection has been lost. When a query is attempted and
the adapter raises this error, the connection pool removes the
connection from the pool, and reraises the error. The Oracle and
PostgreSQL adapters currently support this, and other adapters may
support it in a future version.
* Whether to upcase or quote identifiers can now be set separately.
Previously, upcasing was done when quoting except when using SQLite,
PostgreSQL, or MySQL. Now, you can turn upcasing off while still
quoting. This may be necessary if you are using a MSSQL database
that has lower case table names that conflict with reserved words.
It also allows you to uppercase identifiers when using SQLite,
PostgreSQL, or MySQL, which may be beneficial in certain cases.
To turn upcasing on or off:
# Global
Sequel.upcase_identifiers = true
# Database
DB = Sequel.connect("postgres://...", :upcase_identifiers=>true)
DB.upcase_identifiers = false
# Dataset
ds = DB[:items]
ds.upcase_identifiers = true
* Options are now supported when altering a columns type:
DB.alter_table(:items) do
set_column_type :score, :integer, :unsigned=>true
set_column_type :score, :varchar, :size=>30
set_column_type :score, :enum, :elements=>['a', 'b']
end
* Standard conforming strings are now turned on by default in the
PostgreSQL adapter. This makes PostgreSQL not interpret backslash
escapes. This is the PostgreSQL recommended setting, which will be
the default setting in a future version of PostgreSQL. If you
don't want for force the use of standard strings, use:
Sequel::Postgres.force_standard_strings = false
You need to do that after you call Sequel.connect but before you
use the database for anything, since that setting is set on
initial connection.
* Sequel now raises an error if you attempt to use EXCEPT [ALL] or
INTERSECT [ALL] on a database that doesn't support it.
* Sequel now raises an error if you attempt to use DISTINCT ON with
MySQL or Oracle, which don't support it.
* A subadapter for the Progress RDBMS was added to the ODBC adapter.
To connect to a Progress database, use the :db_type=>'progress'
option. This adapter targets Progress 9.
* The ODBC adapter now supports transactions.
* The MSSQL shared adapter now supports multi_insert (for inserting
multiple rows at once), and unicode string literals.
Other Improvements
------------------
* There were many improvements related to using schemas in databases.
Using schema-qualified tables should work in most if not all cases
now. Model associations, getting the schema, joins, and many other
parts of Sequel were modified to allow the use of schema-qualifed
tables.
* You can now use literal strings with placeholders as well as
subselects when using prepared statements. For example, the
following all work now:
DB[:items].filter("id = ?", :$i).call(:select, :i=>1)
DB[:items].filter(:id=>DB[:items].select(:id)\
.filter(:id=>:$i)).call(:select, :i=>1)
DB["SELECT * FROM items WHERE id = ?", :$i].call(:select, :i=>1)
* Model#initialize received a few more micro-optimizations.
* Model#refresh now clears the changed columns as well as the
associations.
* You can now drop columns inside a transaction when using SQLite.
* You can now submit multiple SQL queries at once in the MySQL
adapter:
DB['SELECT 1; SELECT 2'].all
#=> [{:"1"=>1, :"2"=>2}]
This may fix issues if you've seen a MySQL "commands out of sync"
message. Note that this doesn't work if you are connecting to
MySQL via JDBC.
* You can now use AliasedExpressions directly in table names given
to join_table:
DB.from(:i.as(:j)).join(:k.as(:l), :a=>:b)
#=> ... FROM i AS j INNER JOIN k AS l ON (l.a = j.b)
* Database#rename_table once again works on PostgreSQL. It was
broken in 2.7.0.
* The interval type is now treated as it's own type. It was
previously treated as an integer type.
* Subselects are now aliased correctly when using Oracle.
* UNION, INTERSECT, and EXCEPT statements now appear before ORDER
and LIMIT on most databases. If you use these constructs, please
test and make sure that they work correctly with your database.
* SQL EXCEPT clause now works on Oracle, which uses MINUS instead.
* Dataset#exists now returns a LiteralString, to make it easier to
use.
* The Sequel.odbc_mssql method was removed, as the odbc_mssql adapter
was removed in a previous version. Instead, use:
Sequel.odbc(..., :db_type=>'mssql')
Backwards Compatibilty
----------------------
* The hash returned by Database#schema when no table name is provided
uses quoted strings instead of symbols as keys. The hash has a
default proc, so using the symbol will return the same value as
before, but if you use each to iterate through the hash, the keys
will be different. This was necessary to handle schema-qualified
tables.
* Database#table_exists? no longer checks the output of
Database#tables. If the table exists in the schema, it returns
true, otherwise, it does a query. This was necessary because
table_exists? accepts multiple formats for table names and
Database#tables is an array of symbols.
* When getting the schema on PostgreSQL, the default schema is now
used even if the :schema=>nil option is used.
sequel-5.63.0/doc/release_notes/2.9.0.txt 0000664 0000000 0000000 00000007253 14342141206 0017713 0 ustar 00root root 0000000 0000000 New Features
------------
* Compound SQL statement (i.e. UNION, EXCEPT, and INTERSECT) support
is much improved. Chaining compound statement calls now longer
wipes out previous compound statements calls of the same type.
Also, the ordering of the compound statements is no longer fixed
per adapter, it now reflects the order they were called on the
object. For example, the following now work as expected:
ds1.union(ds2).union(ds3)
ds1.except(ds2).except(ds3)
ds1.intersect(ds2).intersect(ds3)
ds1.union(ds2).except(ds3)
ds1.except(ds2).intersect(ds3)
ds1.intersect(ds2).union(ds3)
* Exception classes ValidationFailure and BeforeHookFailure were
added so it is eaiser to catch a failed validation. These are
both subclasses of Sequel::Error, so there shouldn't be any
backwards compatibility issues. Error messages are also improved,
as the ValidationFailure message is a string containing all
validation failures and the BeforeHookFailure message contains
which hook type caused the failure (i.e. before_save,
before_create, or before_validate).
* The sequel command line tool now has a -L option to load
all files in the given directory. This is mainly useful for
loading a directory of model files. The files are loaded
after the database connection is set up.
* Methods to create and drop database functions, triggers, and
procedural languages were added to the PostgreSQL adapter.
Other Improvements
------------------
* Database#schema now raises an error if you pass a table that
doesn't exist. Before, some adapters would return an empty schema.
The bigger problem with this is that it made table_exists? return
the wrong value, since it looks at the Database's schema.
Generally, this bug would show up in the following code:
class Blah < Sequel::Model
end
Blah.table_exists? # True even if blahs is not a table
* AlterTableGenerator#add_foreign_key now works for MySQL.
* Error messages in model association methods that add/remove an
associated object are now more descriptive.
* Dataset#destroy for model datasets now works with databases that
can't handle nested queries. However, it now loads all model
objects being destroyed before attempting to destroy any of them.
* Dataset#count now works correctly for compound SQL statements
(i.e. UNION, EXCEPT, and INTERSECT).
* BigDecimal NaN and (+/-)Infinity values are now literalized
correctly. Database support for this is hit or miss. Sqlite will
work correctly, PostgreSQL raises an error if you try to store an
infinite value in a numeric column (though it works for float
columns), and MySQL converts all three to 0.
* The SQLite adapter no longer loses primary key information when
dropping columns.
* The SQLite adapter now supports dropping indicies.
* A bug in the MSSQL adapter's literalization of LiteralStrings has
been fixed.
* The literalization of blobs on PostgreSQL (bytea columns) has been
fixed.
* Sequel now raises an error if you attempt to subclass Sequel::Model
before setting up a database connection.
* The native postgresql adapter has been changed to only log client
messages of level WARNING by default. You can modify this via:
Sequel::Postgres.client_min_messages = nil # Use Server Default
Sequel::Postgres.client_min_messages = :notice # Use NOTICE level
* Model#inspect now calls Model#inspect_values for easier
overloading.
Backwards Compatibilty
----------------------
* The API to Model#save_failure (a private method) was changed to
remove the second argument.
* SQLite columns with type numeric, decimal, or money are now
returned as BigDecimal values. Before, they were probably returned
as strings.
sequel-5.63.0/doc/release_notes/3.0.0.txt 0000664 0000000 0000000 00000020360 14342141206 0017675 0 ustar 00root root 0000000 0000000 Deprecated Methods/Features Removed
-----------------------------------
Methods and features that were deprecated in 2.12.0 have been removed
in 3.0.0. Many features were moved into plugins or extensions, so in
many cases you just need to require an extension or use Model.plugin
and not make any changes to your code. See the 2.12.0 release notes
for the list of methods/features deprecated in 2.12.0.
If you are upgrading from a previous 2.x release, please upgrade to
2.12.0 first, fix your code to remove all deprecation warnings, and
then upgrade to 3.0.0.
New Adapter
-----------
* Sequel now has an Amalgalite adapter. Amalgalite is a ruby
extension that embeds SQLite without requiring a separate SQLite
installation. The adapter is functionality complete but
significantly slower than the native SQLite adapter.
New Features
------------
* The JDBC, PostgreSQL, MySQL, and SQLite adapters all now have a
Database#indexes method that returns indexes for a given table:
DB.indexes(:songs)
=> {:songs_name_index=>{:unique=>true, :columns=>[:name]},
:songs_lyricid_index=>{:unique=>false, :columns=>[:lyricid]}}
* A schema_dumper extension was added to Sequel. It supports dumping
the schema of a table (including indexes) as a string that can be
evaluated in the context of a Database object to create the table.
It also supports dumping all tables in the database as a string
containing a Migration subclass that will rebuild the database.
require 'sequel/extensions/schema_dumper'
DB.dump_table_schema(:table)
DB.dump_schema_migration
DB.dump_schema_migration(:same_db=>true)
DB.dump_schema_migration(:indexes=>false)
DB.dump_indexes_migration
The :same_db option causes Sequel to not translate column types
to generic column types. By default, the migration created will
use generic types so it will run on other databases. However, if
you only want to support a single database, using the :same_db
option will make the migration use the exact database type parsed
from the database.
The :indexes=>false option causes indexes not be included in the
migration. The dump_indexes_migration can be used to create a
separate migration with the indexes. This can be useful if you
plan on loading a lot of data right after creating the tables,
since it is faster to add indexes after the data has been added.
* Using options with the generic database types is now supported to
a limited extent. For example, the following code now works:
DB.create_table(:table) do
String :a, :size=>50 # varchar(50)
String :b, :text=>true # text
String :c, :fixed=>true, :size=>30 # char(30)
Time :ts # timestamp
Time :t, :only_time=>true # time
end
* Using Dataset#filter and related methods with multiple arguments
now works much more intuitively:
# 2.12.0
dataset.filter(:a, :b=>1) # a IS NULL AND (b = 1) IS NULL
# 3.0.0
dataset.filter(:a, :b=>1) # a AND b = 1
* You can now create temporary tables by passing the :temp=>true
option to Database#create_table.
* The Oracle shared adapter now supports emulation of
autoincrementing primary keys by creating a sequence and a trigger,
similar to how the Firebird adapter works.
* The Database#database_type method was added that returns a symbol
specifying the database type being used. This can be different
than Database.adapter_scheme if you are using an adapter like
JDBC that allows connecting to multiple different types of
databases.
* Database#drop_index and related methods now support an options
hash that respects the :name option, so they can now be used to
drop an index that doesn't use the default index name.
* The PostgreSQL shared adapter now supports a
Database#reset_primary_key_sequence method to reset the
primary key sequence for a given table, based on code from
ActiveRecord.
* SQL::QualifiedIdentifiers can now be qualified, allowing you to do:
:column.qualify(:table).qualify(:schema)
* Using the :db_type=>'mssql' option with the DBI adapter will now
load the MSSQL support.
* The MySQL shared adapter now supports Dataset#full_text_sql, which
you can use in queries like the following:
ds.select(:table.*, ds.full_text_sql(:column, 'value').as(:ft))
Other Improvements
------------------
* Sequel will now release connections from the connection pool
automatically if they are held by a dead thread. This can happen
if you are using MRI 1.8 and you are heavily multithreaded or
you call Thread#exit! or similar method explicitly. Those methods
skip the execution of ensure blocks which normally release the
connections when the threads exit.
* Model#save will now always use the same server when refreshing data
after an insert. This fixes an issue when Sequel's master/slave
database support is used with models.
* SQL Array references are now quoted correctly, so code like this
now works:
:table__column.sql_subscript(1)
* The PostgreSQL shared adapter now handles sequences that need to be
quoted correctly (previously these were quoted twice).
* String quoting on Oracle no longer doubles backslashes.
* Database#count now works correctly when used on MSSQL when using
an adapter that doesn't handle unnamed columns.
* Full text searching in the MySQL adapter now works correctly when
multiple search terms are used.
* Altering a column's name, type, default, or NULL/NOT NULL status
on MySQL now keeps other relevent column information. For example,
if you alter a column's type, it'll keep an existing default. This
functionality isn't complete, there may be other column information
that is lost.
* Fix creation of an index with a given type on MySQL, since MySQL's
documentation lies.
* The schema parser now handles decimal types with size specifiers,
fixing use on MySQL.
* Dataset#quote_identifier now works correctly when given an
SQL::Identifier. This allows you to do:
dataset.select{sum(hours).as(hours)}
Backwards Compatibility
-----------------------
* Sequel will now use instance_eval on all virtual row blocks without
an argument. This can lead to much nicer code:
dataset.filter{(number > 10) & (name > 'M')}
# WHERE number > 10 AND name > 'M'
2.12.0 raised a deprecation warning if you used a virtual row block
without an argument and you hadn't set
Sequel.virtual_row_instance_eval = true.
* Dataset#exclude now inverts the given argument, instead of negating
it. This only changes its behavior if it is called with a hash or
array of all two pairs that have more than one element.
# 2.12.0
dataset.exclude(:a=>1, :b=>1) # a != 1 AND b != 1
# 3.0.0
dataset.exclude(:a=>1, :b=>1) # a != 1 OR b != 1
This was done for consistency, since exclude would only negate a
hash if it was given an argument, it would invert the same hash
if you used a block:
# 2.12.0
dataset.exclude{{:a=>1, :b=>1}} # a != 1 OR b != 1
If you want the previous behavior,
change the code to the following:
dataset.filter({:a=>1, :b=>1}.sql_negate)
* As noted above, the methods/features deprecated in 2.12.0 were
removed.
* The private Dataset#select_*_sql methods now only take a single
argument, the SQL string being built.
* Dataset#from when called without arguments would previously cause an
error to be raised when the SQL string is generated. Now it causes
no FROM clause to be used, similar to how Dataset#select with no
arguments causes SELECT * to be used.
* The internals of the generic type support and the schema generators
were changed significantly, which could have some fallout in terms
of old migrations breaking if they used the generic types and were
relying on some undocumented behavior (such as using Integer as a
type with the :unsigned option).
* The Firebird adapter no longer translates the text database
specific type. Use the following instead:
String :column, :text=>true
* The MySQL shared adapter used to use the timestamp type for Time,
now it uses datetime. This is because the timestamp type cannot
represent everything that the ruby Time class can represent.
* Metaprogramming#metaattr_accessor and #metaattr_reader methods were
removed.
* Dataset#irregular_function_sql was removed.
sequel-5.63.0/doc/release_notes/3.1.0.txt 0000664 0000000 0000000 00000037576 14342141206 0017717 0 ustar 00root root 0000000 0000000 New Plugins
-----------
3 new plugins were added that implement features supported by
DataMapper: identity_map, tactical_eager_loading, and
lazy_attributes. These plugins don't add any real new features,
since you can do most of what they allow before simply by being
a little more explicit in your Sequel code. However, some people
prefer a less explicit approach that uses a bit more magic, and
now Sequel can accomodate them.
* The identity_map plugin allows you to create a 1-1
correspondence of model objects to database rows via a temporary
thread-local identity map. It makes the following statment true:
Sequel::Model.with_identity_map do
Album.filter{(id > 0) & (id < 2)}.first.object_id == \
Album.first(:id=>1).object_id
end
As the code above implies, you need to use the with_identity_map
method with a block to use the identity mapping feature.
By itself, identity maps don't offer much, but Sequel uses them
as a cache when looking up objects by primary key or looking up
many_to_one associated objects. Basically, it can be used as a
performance enhancer, and it also allows the support of the
lazy_attributes plugin.
The identity_map plugin is expected to be most useful in web
applications. With that in mind, here's a Rack middleware that
wraps each request in a with_identity_map call, so the
identity_map features are available inside the web app:
Sequel::Model.plugin :identity_map
class SequelIdentityMap
def initialize(app)
@app = app
end
def call(env)
Sequel::Model.with_identity_map{@app.call(env)}
end
end
* The tactical_eager_loading plugin allows you to eagerly load an
association for all models retrieved in the same group whenever
one of the models accesses the association:
# 2 queries total
Album.filter{id<100}.all do |a|
a.artists
end
In order for this correctly, you must use Dataset#all to load the
records, you cannot iterate over them via Dataset#each. This is
because eager loading requires that you have all records in
advance, and when using Dataset#each you cannot know about later
records in the dataset.
Before, you could just be explicit about the associations you
needed and make sure to eagerly load them using eager before
calling Dataset#all.
* The lazy_attributes plugin builds on the identity_map and
tactical_eager_loading plugins and allows you to create
attributes that are lazily loaded from the database:
Album.plugin :lazy_attributes, :review
This will remove the :review attribute from being selected by
default. If you try to access the attribute after it is selected,
it'll retrieve the value from the database. If the object was
retrieved with a group of other objects and an identity map is in
use, it'll retrieve the lazy attribute for the entire group of
objects at once, similar to the tatical_eager_loading plugin:
# 2 queries total
Sequel::Model.with_identity_map do
Album.filter{id<100}.all do |a|
a.review
end
end
Before, you could just set the default selected columns for a model
to not include the lazy attributes, and just use select_more to
add them to any query where the resulting model objects will
need the attributes.
* A many_through_many plugin was also added. This very powerful
plugin allows you to create associations to multiple objects through
multiple join tables. Here are some examples:
# Assume the following many to many associations:
Artist.many_to_many :albums
Album.many_to_many :tags
# Same as Artist.many_to_many :albums
Artist.many_through_many :albums,
[[:albums_artists, :artist_id, :album_id]]
# All tags associated to any album this artist is associated to
Artist.many_through_many :tags,
[[:albums_artists, :artist_id, :album_id],
[:albums, :id, :id],
[:albums_tags, :album_id, :tag_id]]
# All artists associated to any album this artist is associated to
Artist.many_through_many :artists,
[[:albums_artists, :artist_id, :album_id],
[:albums, :id, :id],
[:albums_artists, :album_id, :artist_id]]
# All albums by artists that are associated to any album this
# artist is associated to
Artist.many_through_many :artist_albums,
[[:albums_artists, :artist_id, :album_id],
[:albums, :id, :id],
[:albums_artists, :album_id, :artist_id],
[:artists, :id, :id],
[:albums_artists, :artist_id, :album_id]]
Basically, for each join table between this model and the
associated model, you use an array with a join table name, left key
name (key closer to this model), and right key name (key closer to
the associated model).
In usual Sequel fashion, this association type works not just
for single objects, but it can also be eagerly loaded via eager or
eager_graph. There are numerous additional configuration options,
please see the RDoc for details.
New bin/sequel Features
-----------------------
The bin/sequel command line tool now supports the following options:
* -C: Copies one database to another. You must specify two database
arguments. Works similar to Taps, copying the table schema, then
the table data, then creating the indexes.
* -d: Dump the schema of the database in the database-independent
migration format.
* -D: Dump the schema of the database in the database-specific
migration format.
* -h: Display the help
* -t: Output the full backtrace if an exception is raised
The bin/sequel tool is now better about checking which options can
be used together. It also now supports using the -L option multiple
times and having it load model files from multiple directory trees.
New Features
------------
* Dataset#qualify_to and #qualify_to_first_source were added. They
allow you to qualify unqualified columns in the current dataset
to the given table or the first source. This can be used to join
a dataset that has unqualified columns to a new table which has
columns with the same name.
For example, take this dataset:
ds = DB[:albums].select(:name).order(:name).filter(:id=>1)
# SELECT name FROM albums WHERE (id = 1) ORDER BY name
Let's say you want to join it to the artists table:
ds2 = ds.join(:artists, :id=>:artist_id)
# SELECT name FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
# WHERE (id = 1) ORDER BY name
That's going to give you an error, as the artists table already has
columns named id and name. This new feature allows you to do the
following:
ds2 = ds.qualify_to_first_source.join(:artists, :id=>:artist_id)
# SELECT albums.name FROM albums
# INNER JOIN artists ON (artists.id = albums.artist_id)
# WHERE (albums.id = 1) ORDER BY albums.name
By doing this, all unqualified columns are qualified, so you get
a usable query. This is expected to be most useful for users that
have a default order or filter on their models and want to join
the model to another table. Before you had to replace the filters,
selection, etc. manually, or use qualified columns by default even
though the weren't needed in most cases.
* Savepoints are now supported using SQLite and MySQL, assuming you
are using a database version that supports them. You need to
pass the :savepoint option to Database#transaction to use a
savepoint.
* Model plugins can now depend on other plugins, simply by calling
the Model.plugin method inside the plugin's apply method:
module LazyAttributes
def self.apply(model)
model.plugin :tactical_eager_loading
end
* Model.plugin now takes a block with is passed to the plugin's
apply and configure method (see Backwards Compatibility section for
more information on the configure method).
* You can see which plugins are loaded for a model by using
Model.plugins.
* You can use Sequel.extension method to load extensions:
Sequel.extension :pagination, :query
This will only load extensions that ship with Sequel, unlike the
Model.plugin method which will also load external plugins.
* You can now use Database#create_table? to create the table if it
doesn't already exist (a very common need, it seems). The schema
plugin now supports Model.create_table? as well.
* #sql_subscript is now an allowed method on most SQL expression
objects that Sequel generates. Also, arguments to #sql_subscript
can now be other expressions instead of just integers.
* Associations can now take a :cartesian_product_number option, which
can be used to tell Sequel whether to turn on duplicate object
detection when eagerly loading objects through eager_graph. This
number should be 0 if the association can never create multiple
rows for each row in the current table, 1 if it can create multiple
rows in the each row in the current table, and 2 if the association
itself causes a cartesian product.
* On MySQL, Dataset#insert_ignore now affects #insert as well as
multi_insert and import.
* Database#create_table now supports an :ignore_index_errors option,
and Database#add_index now supports an :ignore_errors option.
These are used by the schema_dumper when dumping an database
schema to be restored on another database type, since indexes
aren't usually required for proper operation and some indexes
can't be transferred.
* The ADO adapter now takes a :provider option, which can be used
to set the provider.
* The ADO adapter now takes a :command_timeout option, which tells
the connection how long to wait before giving up and raising an
exception.
* The Sequel.amalgalite adapter method was added. Like the
Sequel.sqlite method, you can call it with no arguments to get
an in memory database.
Other Improvements
------------------
* MySQL "commands out of sync" errors should no longer occur unless
you are nesting queries (calling Dataset#each inside Dataset#each).
A bug dating at least to 2007 and possibly since the initial
creation of the Sequel MySQL adapter was the cause. Before, SQL
that caused a result set that was sent using a method where Sequel
doesn't yield a result set would cause the "commands out of sync"
error on the following query. For example, the following code
would cause the error:
DB << "SHOW DATABASES"
If for some reason a "commands out of sync" error does occur,
Sequel will disconnect the connection from the connection pool,
so it won't continually stay in the pool and raise errors every
time it is used.
* The schema_dumper extension is much better about parsing defaults
from the database. It can now correctly parse most defaults on
MySQL, SQLite, and PostgreSQL databases. It no longer includes
defaults that it can't parse to a ruby object unless a database-
specific dump is requested.
* The schema_dumper extension now dumps tables in alphabetical order.
* Ordered and limited datasets are now handled correctly when using
union, intersect, and except. Also, union, intersect, and except
now always return a from_self dataset, so further limiting,
filtering, and ordering of them now works as expected.
* Dataset#graph now works correctly with a complex dataset without
having to use from_self. Before, code like the following didn't
do what was expected:
DB[:albums].
graph(DB[:artists].filter{name > 'M'}, :id=>:artist_id)
Before, the filter on DB[:artists] would be dropped. Now, Sequel
correctly uses a subselect.
* You can now specify serialization formats per column in the
serialization plugin, either by calling the plugin multiple
times or by using the new serialize_attributes method:
Album.plugin :serialization
Album.serialize_attributes :marshal, :review
Album.serialize_attributes :yaml, :name
Album.serialization_map #{:name=>:yaml, :review=>:marshal}
The public API for the serialization plugin is still backwards
compatible, but the internals have changed slightly to support
this new feature.
* You can now use eager_graph to eagerly load associations for models
that lack primary keys.
* The :eager_graph association option now works when lazily-loading
many_to_many associations.
* Dataset#add_graph_aliases now works correctly even if
set_graph_aliases hasn't been used.
* The PostgreSQL Database#tables method now assumes the public schema
if a schema is not given and there is no default_schema.
* The PostgreSQL Database#indexes method no longer returns partial
indexes or functional indexes.
* The MySQL Database#indexes method no longer returns indexes on
partial columns (prefix indexes).
* Default values for String :text=>true and File columns on MySQL
are ignored, since MySQL doesn't support them. They are not
ignored if you use text and blob, since then you are using the
database-specific syntax and Sequel doesn't do translation when
the database-specific syntax is used.
* On PostgreSQL, attempting the reset the primary key sequence for a
table without a primary key no longer causes an error.
* Using a placeholder string in an association's :condition option
now works correctly (e.g. :conditions=>['n = ?', 1])
* An error is no longer raised if you attempt to load a plugin that
has a DatasetMethods module but no public dataset methods.
* The check for dataset[n] where n is an integer was fixed. It now
raises an error inside of returning a limited dataset.
* On PostgreSQL, Dataset#insert with static SQL now works correctly.
* A reflection.rdoc file was added giving an overview of Sequel's
reflection support.
* The Migrator now works correctly with file names like
001_12312412_file_name.rb.
* The association code now requires the classes match when looking
for a reciprocal association.
* An unlikely threading bug (race condition) was possible when using
the validation_class_methods plugin. The plugin was refactored and
now uses a mutex to avoid the issue. One of the refactoring changes
makes it so that you can no longer use a class level vaildation
inside a Class.new block (since inherited isn't called until the
block finishes).
* The exception messages when Sequel.string_to_* fail have been fixed.
* The String :text=>true generic database type has been fixed when
using the Firebird adapter.
Backwards Compatibility
-----------------------
* A plugin's apply method is now only called the first time a plugin
is loaded. Plugins can now have a configure method that is called
every time the plugin is loaded, and is always called after the
instance methods, class methods, and dataset method submodules have
been added to the model. This is different from apply, which is
called before the submodules are loaded.
If you are a plugin author, please check your implementation to
make sure this doesn't cause problems for you. If you have
questions, please post on the Sequel mailing list.
This new plugin feature will make certain things a lot easier, and
it should be mostly backwards compatible. However, if a plugin
was previously expected to be loaded multiple times with the apply
method called each time, it will no longer work correctly.
* The plugin_opts methods defined now include multiple args in an
array if multiple args are given. Before, the plugin_opts methods
just returned the first argument.
* Database#table_exists? no longer checks the cached schema
information. By default, it will always do a database query
(unless overridden in an adapter). This shouldn't affect the
results, but if were using the method a lot and expecting it to
use cached information, it doesn't have the same performance
characteristics.
* The internal storage of the :select option for datasets have
changed. You can no longer use a hash as a way of aliasing
columns. Dataset#select now does the translation from the hash to
SQL::AliasedExpression instances. Basically, if you were using
Dataset#clone directly with a :select option with hashes for
aliasing, you should switch to using Dataset#select or changing
the hashes to AliasedExpressions yourself.
sequel-5.63.0/doc/release_notes/3.10.0.txt 0000664 0000000 0000000 00000027045 14342141206 0017765 0 ustar 00root root 0000000 0000000 New Features
------------
* A real one_to_one association was added to Sequel, replacing the
previous :one_to_one option of the one_to_many association.
This is a fully backwards incompatible change, any code that uses
the :one_to_one option of one_to_many will be broken in Sequel
3.10.0, as that option now raises an exception. Keeping backwards
compatibility was not possible, as even the name of the association
needs to be changed. Here are the code changes you need to make:
* The association definition needs to change from one_to_many to
one_to_one, with no :one_to_one option, and with the association
name changed from the plural form to the singular form:
# Before
Lyric.one_to_many :songs, :one_to_one=>true
# After
Lyric.one_to_one :song
* All usage of the association when eager loading or when getting
reflections need to use the new singular association name:
# Before
Lyric.eager(:songs).all
Lyric.eager_graph(:songs).all
Lyric.association_reflection(:songs)
# After
Lyric.eager(:song).all
Lyric.eager_graph(:song).all
Lyric.association_reflection(:song)
Any Sequel plugins or extensions that deal with the internals of
associations need to be made aware of the one_to_one association,
and how it is different than one_to_many's previous :one_to_one
option. Here are some internal changes that may affect you:
* one_to_one associations are now cached like many_to_one
associations instead of like one_to_many associations. So the
cache includes the associated object or nil, instead of an array.
Note that this change means that all custom :eager_loader options
for one_to_one associations need to change to use this new
caching scheme.
* The one_to_one association setter method is now handled similarly
to the many_to_one setter method, instead of using the internal
one_to_many association add method.
* Instead of raising an error when multiple rows are returned,
one_to_one associations now use limit(1) to only return a single
row.
There were some other fixes made during these changes:
* The one_to_one setter now accepts nil to disassociate the record.
Previously, this raised an error.
* If the one_to_one association already had a separate object
associated, and you assigned a different object in the setter
method, Sequel now disassociates the old object before
associating the new object, fixing some potential issues if there
is a UNIQUE constraint on the foreign key column.
* Using the many_to_one association setter where the reciprocal
association is a one_to_one association with a currently
different cached associated object no longer raises an exception.
* The nested_attributes and association_dependencies plugins
both now correctly handle one_to_one associations.
If you need any help migrating, please post on the Sequel Google
Group or ask in the #sequel IRC channel.
* Both many_to_one and one_to_one associations now use before_set
and after_set callbacks instead of trying to make the one_to_many
and many_to_many associations' (before|after)_(add|remove)
callbacks work.
This change makes the code simpler, makes writing callbacks easier,
and no longer requires Sequel to send a query to the database to
get the currently associated object in the many_to_one association
setter method (you can still do so manually in a before_set
callback if you want to).
* Dataset#for_update was added as a default dataset method.
Previously, it was only supported on PostgreSQL. It has been
tested to work on PostgreSQL, MySQL, SQLite (where it is ignored),
H2, and MSSQL.
* Dataset#lock_style was added as a backbone for Dataset#for_update,
but allowing you to specify custom lock styles. These can either
be symbols recognized by the adapters, or strings which are treated
as literal SQL.
* Model#lock! was added, which uses Dataset#for_update to lock model
rows for specific instances. Combined with the Dataset#for_update,
Sequel now has an equivalent to ActiveRecord's pessimistic locking
support.
* A composition plugin was added, given similar functionality as
ActiveRecord's composed_of.
The composition plugin allows you to easily define getter and
setter instance methods for a class where the backing data is
composed of other getters and decomposed to other setters.
A simple example of this is when you have a database table with
separate columns for year, month, and day, but where you want to
deal with Date objects in your ruby code. This can be handled
with:
Model.composition :date, :mapping=>[:year, :month, :day]
The :mapping option is optional, but if not used, you need define
custom composition and decomposition procs via the :composer and
:decomposer options.
Note that when using the composition object, you should not modify
the underlying columns if you are also instantiating the
composition, as otherwise the composition object values will
override any underlying columns when the object is saved.
* An rcte_tree plugin was added, which uses recursive common table
expressions to load all ancestors and descendants in a single
query. If your database supports recursive common table
expressions (PostgreSQL 8.4+, MSSQL 2005+, newer versions of
Firebird), using recursive common table expressions to load
all ancestors and descendants is significantly faster than storing
trees as nested sets and using nested set queries. Usage:
Model.plugin :rcte_tree
# Lazy loading
model = Model.first
model.parent
model.children
model.ancestors # Populates :parent association as well
model.descendants # Populates :children association as well
# Eager loading - also populates the :parent and children
# associations for all ancestors and descendants
Model.filter(:id=>[1, 2]).eager(:ancestors, :descendants).all
# Eager loading children and grandchildren
Model.filter(:id=>[1, 2]).eager(:descendants=>2).all
# Eager loading children, grandchildren, and great grandchildren
Model.filter(:id=>[1, 2]).eager(:descendants=>3).all
* Dataset#first_source_table was added, giving you the unaliased
version of the table for the first source.
* Add Sequel::BasicObject.remove_methods!, useful on ruby 1.8 if you
require other libraries after Sequel that add methods to Object.
For example, if YAML is required after sequel, then the following
will raise an error:
DB[:a].filter{x > y}
because YAML adds the y method to all objects. Now, you can call
Sequel::BasicObject.remove_methods!, which will remove those
methods from Sequel::BasicObject, allowing them to be used as
intended in the above DSL.
* Sequel associations now accept an :eager_loader_key option, which
can be useful for associations to specify the column to use for the
key_hash for custom :eager_loaders.
* A JDBC subadapter for the AS400 database was added.
Other Improvements
------------------
* The one_to_one setter method and the one_to_many and many_to_many
remove_all methods now apply the association options (such as
filters) on the appropriate dataset:
Artist.one_to_many :good_albums, :class=>:Album,
:conditions=>{:good=>true}
a = Artist[10]
a.remove_all_good_albums
# Before: WHERE artist_id = 10
# After: WHERE artist_id = 10 AND good IS TRUE
* Plugin loading now works correctly when the plugin module name
is the same name as an already defined top level constant. This
means that the active_model plugin should now work correctly if
you require active_model before loading the Sequel plugin.
* The nested_attributes plugin now preserves nested attributes for
*_to_one associations on validation failures.
* Transactions now work correctly on Oracle when using the JDBC
adapter.
* Dataset#limit once again works correctly on MSSQL 2000. It was
broken in Sequel 3.9.0.
* many_to_one associations now use limit(1) to ensure only one
record is returned. If you don't want this (because maybe you
are using the :eager_graph association option), you need to
set the :key option to nil and use a custom :dataset option.
* many_to_one and one_to_many associations now work correctly
with the association :eager option to eagerly load associations
specified by :eager when lazy loading the association.
* The typecast_on_load plugin now correctly handles
reloading/refreshing the object, both explicitly and implicitly
on object creation.
* The schema parser and dumper now return tinyint columns as
booleans when connecting to mysql using the do adapter, since
DataObjects now returns the columns as booleans.
* The schema dumper now deals better with unusual or database
specific primary key types when using the :same_db option.
* On ruby 1.8, Sequel::BasicObject now undefs private methods in
addition to public and protected methods. So the following
code now works as expected:
DB[:a].filter{x > p} # WHERE x > p
* Sequel.connect with a block now returns the value of the block:
max_price = Sequel.connect('sqlite://items.db') do |db|
db[:items].max(:price)
end
* MSSQL emulated offset support now works correctly when Sequel's
core extensions are not loaded.
* Sequel::BasicObject now works correctly on rubinius, and almost
all Sequel specs now pass on rubinius.
* The nested_attributes plugin now uses a better exception message
no matching associated object is found.
* Sequel now raises a more informative error if you attempt to use
the native sqlite adapter with the sqlite3 gem instead of the
sqlite3-ruby gem.
* Multiple complex expressions with the same operator are now
combined for simpler SQL:
DB[:a].filter(:a=>1, :b=>2).filter(:c=>3)
# Before: (((a = 1) AND (b = 2)) AND (c = 3))
# After: ((a = 1) AND (b = 2) AND (c = 3))
* The Sequel::Model dataset methods (class methods proxied to the
model's dataset) and the Sequel::Dataset mutation methods
(methods that have a ! counterpart to modify the object in place)
have both been updated to use new dataset methods added in recent
versions.
Backwards Compatibility
-----------------------
* The :one_to_one option of the one_to_many associations now raises
an exception. Please see the section above about the new real
one_to_one association.
* The change to apply the association options to the one_to_many and
many_to_many remove_all methods has the potential to break some
code that uses the remove_all method on associations that use
association options. This is especially true for many_to_many
associations, as filters in many_to_many associations will often
reference columns in the associated table, while the dataset
used in the remove_all method only contains the join table. Such
cases should be handled by manually overriding the _remove_all
association instance method in the class. It was determined that
it was better to issue possibly invalid queries than to issue
queries that make unexpected modifications.
* Dataset#group_and_count now longer orders the dataset by the count.
Since it returns a modified dataset, if you want to order the
dataset, just call order on the returned dataset.
* many_to_one associations now require a working :class option.
Previously, if you provided a custom :dataset option, a working
:class option was not required in some cases.
* The MSSQL shared adapter dataset methods switched from using
the :table_options internal option key to using the :lock internal
option key.
sequel-5.63.0/doc/release_notes/3.11.0.txt 0000664 0000000 0000000 00000023601 14342141206 0017760 0 ustar 00root root 0000000 0000000 = New Features
* A few new features were added to query logging. Sequel now
includes execution time when logging queries. Queries that
raise exceptions are now logged at ERROR level. You can now
set the log_warn_duration attribute on the Database instance
and queries that take longer than that will be logged at WARN
level. By using different log levels, you can now only log
queries that raise errors, or only log queries that take a long
time.
# The default - Log all successful queries at INFO level
DB.log_warn_duration = nil
# Log all successful queries at WARN level
DB.log_warn_duration = 0
# Log successful queries that take the database more than half a
# second at WARN level, other successful queries at INFO level
DB.log_warn_duration = 0.5
All adapters included with Sequel have been modified to support
the new logging API. The previous API is still available, so
any external adapters should still work, though switching to the
new logging API is encouraged.
* Sequel::Model now has a require_modification flag. If not set
explicitly, it is enabled by default if the dataset provides an
accurate number of rows matched by an update or delete statement.
When this setting is enabled, Sequel will raise an exception if
you attempt to update or delete a model object and it doesn't end
up affecting exactly one row. For example:
DB.create_table(:as){primary_key :id}
class A < Sequel::Model; end
a = A.create
# delete object from database
a.delete
a.require_modification = false
a.save # no error!
a.delete # no error!
a.require_modification = true
a.save # Sequel::NoExistingObject exception raised
a.delete # Sequel::NoExistingObject exception raised
Like many other Sequel::Model settings, this can be set on a
global, per class, and per instance level:
Sequel::Model.require_modification = false # global
Album.require_modification = true # class
album.require_modification = false # instance
* An instance_filters plugin was added to the list of built in
plugins, allowing you to add arbitrary filters when updating or
destroying an instance. This allows you to continue using models
when previously you would have had to drop down to using datasets
to get the desired behavior:
class Item < Sequel::Model
plugin :instance_filters
end
# These are two separate objects that represent the same
# database row.
i1 = Item.first(:id=>1, :delete_allowed=>false)
i2 = Item.first(:id=>1, :delete_allowed=>false)
# Add an instance filter to the object. This filter is in effect
# until the object is successfully updated or deleted.
i1.instance_filter(:delete_allowed=>true)
# Attempting to delete the object where the filter doesn't
# match any rows raises an error.
i1.delete # raises Sequel::Error
# The other object that represents the same row has no
# instance filters, and can be updated normally.
i2.update(:delete_allowed=>true)
# Even though the filter is now still in effect, since the
# database row has been updated to allow deleting,
# delete now works.
i1.delete
* An :after_connect database option is now supported. If provided,
the option value should be a proc that takes a single argument.
It will be called with the underlying connection object before
connection object is added to the connection pool, allowing you
to set per connection options in a thread-safe manner.
This is useful for customizations you want set on every connection
that Sequel doesn't already support. For example, on PostgreSQL
if you wanted to set the schema search_path on every connection:
DB = Sequel.postgres('dbname', :after_connect=>(proc do |conn|
conn.execute('SET search_path TO schema1,schema2')
end))
* A :test database option is now supported. If set to true, it
automatically calls test_connection to make sure a connection can
be made before returning a Database instance. For backwards
compatibility reasons, this is not set to true by default, but it
is possible that the default will change in a future version of
Sequel.
* The Dataset#select_append method was added, which always appends
to the existing selected columns. It operates identically to
select_more, except in the case that no columns are currently
selected:
ds = DB[:a]
# SELECT * FROM items
ds.select_more({:id=>DB[:b].select(:a_id)}.as(:in_b))
# SELECT id IN (SELECT a_id FROM b) AS in_b FROM a
ds.select_append({:id=>DB[:b].select(:a_id)}.as(:in_b))
# SELECT *, id IN (SELECT a_id FROM b) AS in_b FROM a
* The Dataset#provides_accurate_rows_matched? method was added which
allows you to see if the dataset will return the actual number of
rows matched/affected by an update or delete call.
* Sequel will now emulate DISTINCT ON support using GROUP BY on
MySQL. On MySQL, GROUP BY is similar to DISTINCT ON, except that
the order of returned rows is not deterministic.
* Support for connecting to Microsoft SQL Server using the JTDS JDBC
driver was added to the jdbc adapter.
* JDNI connection strings are now supported in the JDBC adapter.
* The JDBC adapter should now work in situations where driver
auto-loading has problems, just as when using Tomcat or Trinidad.
* Sequel's JDBC adapter schema parsing now supports a :scale option,
useful for numeric/decimal columns.
* Sequel's schema parsing on Microsoft SQL Server now supports
:column_size and :scale options.
* When connecting to SQLite, a Database#sqlite_version method is
available that gives you the SQLite version as an integer (e.g.
30613 for 3.6.13).
= Other Improvements
* Sequel no longer raises an error if you give Dataset#filter or
related method an empty argument such as {}, [], or ''. This allows
code such as the following to work:
h = {}
h[:name] = name if name
h[:number] = number if number
ds = ds.filter(h)
Before, this would raise an error if both name and number were
nil.
* Numeric and decimal columns with a 0 scale are now treated as
integer columns by the model typecasting code, since such columns
cannot store non-integer values.
* Calling Database#disconnect when using the single threaded
connection pool no longer raises an error if there is no current
connection.
* When using the :ignore_index_errors options to
Database#create_table, correctly swallow errors raised by Sequel
due to the adapter not supporting the given index type.
* The JDBC adapter no longer leaks ResultSets when retrieving
metadata.
* You can now connect to PostgreSQL when using ruby 1.9 with the
-Ku switch.
* When using the native MySQL adapter, only tinyint(1) columns are
now returned as booleans when using the convert_tinyint_to_bool
setting (the default). Previously, all tinyint columns would
be converted to booleans if the setting was enabled.
* Correctly handle inserts returning the autogenerated keys when
using MySQL JDBC Driver version 5.1.12 with the jdbc adapter.
* The native MySQL adapter now supports :config_default_group and
:config_local_infile options.
* When connecting to SQLite, you can provide the :auto_vacuum,
:foreign_keys, :synchronous, and :temp_store options for
making the appropriate PRAGMA setting on the database in a
thread-safe manner. The previous thread-unsafe PRAGMA setting
methods are available, but their use is discouraged.
* Sequel will not enable savepoints when connecting to SQLite
unless the version is 3.6.8 or greater.
* Using limit with distinct now works correctly on Microsoft SQL
Server.
* Database#rename_table now works correctly on Microsoft SQL Server.
* If you specify an explicit :provider when using the ADO adapter,
transactions will now work correctly. The default :provider uses
a new native connection for each query, so it cannot work with
transactions, or things like temporary tables.
* If you specify an explicit :provider when connecting to Microsoft
SQL Server using the ADO adapter (e.g. SQLNCLI10 or SQLNCLI),
Sequel is now able to provide an accurate number of rows modified
and deleted.
* Using set_column_allow_null with a decimal column with a precision
and scale now works correctly when connecting to Microsoft SQL
Server.
* You can now connect to Microsoft SQL Server using the dbi adapter.
* Sequel now recognizes the NUMBER database type as a synonym for
NUMERIC and DECIMAL, which may help some Oracle users.
* Transactions can now be rolled back correctly when connecting to
Oracle via JDBC.
* The active_model plugin now supports ActiveModel 3.0.0beta2.
* Many documentation improvements were made, including the addition
of a dataset basics guide, an association basics guide, an expanded
virtual row guide, and the separation of the Sequel::Dataset RDoc
page into sections. Additional, the RDoc class/method
documentation now contains links to the appropriate guides.
= Backwards Compatibility
* When connecting to SQLite, Sequel now automatically sets the
foreign_keys PRAGMA to true, which will make SQLite 3.6.19+ use
database enforced foreign key constraints. If you do not want
the database to enforce the foreign key constraints, you should
use the :foreign_keys=>false option when connecting to the
database.
* Sequel no longer creates #{plugin_name}_opts class, instance, and
dataset methods for each plugin loaded. No built-in plugin used
them, and I couldn't find an external plugin that did either.
* The Model#associations method is no longer available if the
default Associations plugin is not loaded due to the
SEQUEL_NO_ASSOCIATIONS constant or environment variable being set.
* DISTINCT ON support is turned off by default, and only enabled when
using PostgreSQL, since that appears to be the only database that
supports it. Previously, it was enabled by default and most common
adapters turned it off.
sequel-5.63.0/doc/release_notes/3.12.0.txt 0000664 0000000 0000000 00000031210 14342141206 0017754 0 ustar 00root root 0000000 0000000 = Migration Changes
* A TimestampMigrator has been added to Sequel, and is
automatically used if any migration has a version greater than
20000100. This migrator operates similarly to the default
ActiveRecord migrator, in that it allows missing migrations.
It differs from the ActiveRecord migrator in that it supports
migrations with the same timestamp/version as well as a wide
variety of timestamp formats (though the ActiveRecord default
of YYYYMMDDHHMMSS is recommended and should be used in
portable code).
Sequel still defaults to the old migrator, but you can use the
new one without making changes to your old migrations. Just
make sure your new migration starts with a version greater than
20000100, and Sequel will automatically convert the previous
schema table to the new format.
* A new migration DSL was added:
Sequel.migration do
up do
end
down do
end
end
The old style of using a Sequel::Migration subclass is still
supported, but it is recommended that new code use the new DSL.
* The default migrator also had significant issues fixed. First,
it now saves the migration version after each migration, instead
of after all migrations, which means Sequel won't attempt to
apply already applied migrations if there was previously an error
when applying multiple migrations at once on a database that
didn't support transactional schema modification.
Second, duplicate migration versions in the default migrator now
raise an exception, as do missing migration versions. Neither
should happen when using the default migrator, which requires
consecutive integer versions, similar to the old ActiveRecord
migrator.
* Execution times for migrations are now logged to the database's
loggers.
= New Plugins
* A sharding plugin has been added that allows model objects to
work well with sharded databases. When using it, model objects
know which shard they were retrieved from, so when you save
the object, it is saved back to that shard. The sharding plugin
also works with associations, so associated records are retrieved
from the same shard the main object was retreived from. The
sharding plugin also works with both methods of eager loading, and
provides methods that you can use to create objects on specific
shards.
* An update_primary_key plugin has been added that allows Sequel
to work correctly if you modify the primary key of a model object.
This should not be necessary if you are using surrogate keys, but
if your database uses natural primary keys which can change, this
should be helpful.
* An association_pks plugin has been added that adds association_pks
and association_pks= methods to model objects for both one_to_many
and many_to_many associations. The association_pks method returns
an array of primary key values for the associated objects, and
the association_pks= method modifies the database to ensure that
the object is only associated to the objects specified by the
array of primary keys provided to it.
* A string_stripper plugin has been added that strips all strings
that are assigned to attribute values. This is useful for web
applications where you want to easily remove leading and trailing
whitespace in form entries before storing them in the database.
* A skip_create_refresh plugin has been added that skips the refresh
of after you save a new model object. On most databases, Sequel
refreshes the model object after inserting it in order to get
values for all of the columns. For performance reasons, you can
use this plugin to skip the refresh if it isn't necessary for you.
= Other New Features
* Sequel::Model#set_fields and update_fields were added. These
methods have a similar API to set_only and update_only, but they
operate differently. While set_only and update_only operate over
the hash, these methods operate over the array of fields,
so they don't raise errors if the hash contains fields not
in the array:
params = {:a=>1, :b=>2, :c=>3}
album = Album[1]
# raises Error because :a is not in the fields
album.set_only(params, [:b, :c])
# Just sets the value of album.b and album.c
album.set_fields(params, [:b, :c])
Other than handling entries in the hash that aren't in the array,
set_fields and update_fields also handle entries not in the hash
differently:
# Doesn't modify the object, since the hash is empty
album.set_only({}, [:b, :c])
# Sets album.b and album.c to nil, since they aren't in the hash
album.set_fields({}, [:b, :c])
* The :eager_loader association option has a new API, though the
previous API still works. Instead of accepting three arguments,
it can now accept a single hash argument, which will use the
:key_hash, :rows, and :association keys for the previous three
arguments. The hash will also contain a :self key whose value
is the dataset doing the eager load, which was not possible to
determine using the old API.
* Sequel::SQL::Expression#hash has been added so that the objects
are now safe to use as hash keys.
* A Dataset#order_prepend method has been added allowing you to
prepend to an existing order. This is useful if want to modify
a dataset's order such that it first orders by the columns you
provide, but for any rows where the columns you provide are
equal, uses the existing order to further order the dataset:
ds.order(:albums__name).order_prepend(:artists__name)
# ORDER BY artists.name, albums.name
* When creating foreign key columns, you can now use a :deferrable
option to set up a foreign key constraint that is not checked
until the end of the transaction:
DB.create_table(:albums) do
primary_key :id
String :name
foreign_key :artist_id, :artists, :deferrable=>true
end
* many_to_many associations now support a :join_table_block option
that is used by the add/remove/remove_all methods. It can modify
the dataset to ensure that certain columns are included when
inserting or to add a filter so that only certain records are
deleted. It's useful if you have a many_to_many association that
is filtered to only a subset of the matching rows in the join
table.
* The single_table_inheritance plugin now supports :model_map and
:key_map options to set up a custom mapping of column values to
model classes. For simple situations such as when you are mapping
integer values to certain classes, a :model_map hash is sufficient:
Employee.plugin :single_table_inheritance, :type_id,
:model_map=>{1=>:Staff, 2=>:Manager}
Here the :model_map keys are type_id column values, and the
:model_map values are symbols or strings specifying class names.
For more complex conditions, you can use a pair of procs:
Employee.plugin :single_table_inheritance, :type_name,
:model_map=>proc{|v| v.reverse},
:key_map=>proc{|klass| klass.name.reverse}
Here the type_name column is a string column holding the reverse
of the class's name.
* The single_table_inheritance plugin now correctly sets up subclass
filters for middle tables in a class hierarchy with more than 2
levels. For example, with this code:
class Employee < Sequel::Model; end
Employee.plugin :single_table_inheritance, :kind
class Manager < Employee; end
class Executive < Manager; end
Sequel previously would not return Executives if you used
Manager.all. It now correctly recognizes subclasses so that it
will return both Managers and Executives.
* Sequel::Model.qualified_primary_key_hash has been added, giving
you a hash that can be used for filtering. It is similar to
primary_key_hash, but it qualifies the keys with the model's
table. It's useful if you have joined the table to another table
that has columns with the same name, but you want to only look
for a single model object in that dataset.
* For consistency, you can now use Dataset#order_append as an alias
for order_more.
= Other Improvements
* Sequel now correctly removes schema entries when altering tables.
Previously, some adapters that had to query the existing schema
when altering tables resulted in the previous schema being cached.
* Sequel::Model::Errors#on now always returns nil if there are no
errors on the attribute. Previously, it would return an empty
array in certain cases. Additionally, Sequel::Model::Errors#empty?
now returns true if there are no errors, where it certain cases
it would return false even if there were no errors.
* The schema_dumper extension now works with tables specified as
Sequel::SQL::Identifiers.
* Sequel now recognizes the timestamp(N) with(out) time zone column
type.
* The lazy_attributes plugin no longer requires the core extensions
to work correctly.
* DatabaseDisconnectError support has been added to the ODBC adapter,
allowing Sequel to detect disconnects and remove the connection
from the connection pool.
* A leak of JDBC statement objects when using transactions was
fixed in the jdbc adapter.
* The jdbc adapter now gives a nicer error message if you use a
connection string that it doesn't recognize and there is an error
when connecting.
* Temporary table creation was fixed on Microsoft SQL Server, but
it is not recommended as it changes the name of the table. If
you use Microsoft SQL Server, you should prefix your temporary
table names with # and use the regular create table method.
* A large number of guides were added to Sequel to make it easier
for new and existing users to learn more about Sequel. The
following guides were added:
* Querying in Sequel
* Migration and Schema Modification
* Model Hooks
* Model Validations
* Sequel for SQL Users
* Sequel for ActiveRecord Users
* RDoc section support was added to Sequel::Database, making the
method documentation easier to read.
= Backwards Compatibility
* Sequel::Database now defines the indexes and tables methods, even
if the adapter does not implement them, similar to how connect
and execute are defined. Previously, you could use respond_to? to
check if the adapter supported them, now they raise
Sequel::NotImplemented if the database adapter does not implement
them.
* Sequel used to raise NotImplementedError in certain default
definitions of methods inside Sequel::Database and Sequel::Dataset,
when the methods were supposed to be overridden in subclasses.
Sequel now uses a Sequel::NotImplemented exception class for these
exceptions, which is a subclass of Sequel::Error.
* Sequel no longer applies all association options to the dataset
used to remove all many_to_many associated objects. You should
use the new :join_table_block option to get similar behavior if
you were filtering the many_to_many association based on columns
in the join table and you wanted remove_all to only remove the
related columns.
* Sequel now calls certain before and after hook actions in plugins
in a different order than before. This should not have an effect
unless you were relying on them being called in the previous order.
Now, when overriding before hooks in plugins, Sequel always does
actions before calling super, and when overriding after hooks in
plugins, Sequel always does actions after calling super.
* The hook_class_methods plugin no longer skips later after hooks if
a previous after hook returns false. That behavior now only occurs
for before hooks.
* Sequel now only removes primary key values when updating objects if
you are saving the entire object and you have not modified the
values of the primary keys. Previously, Sequel would remove
primary key values when updating even if you specified the primary
key column specifically or the primary key column was modified and
you used save_changes/update.
* Sequel now uses explicit methods instead of aliases for certain
methods. This should only affect you if for example you overrode
Dataset#group to do one thing and wanted Dataset#group_by to do
the default action. Now, Dataset#group_by, and methods like it, are
explicit methods that just call the methods they previously
aliased. This also means that if you were overriding Dataset#group
and explicitly aliasing group_by to it, you no longer need the
alias.
* The single_table_inheritance plugin now uses IN instead of = for
subclass filters. This could lead to poor performance if the
database has a very bad query planner.
* The private transaction_statement_object method was removed from
the JDBC adapter, and Sequel will no longer check for the presence
of the method in the transaction code.
* The Sequel::Migrator object is now a class instead of a module, and
has been pretty much rewritten. If you were using any methods of
it besides apply and run, they no longer work.
sequel-5.63.0/doc/release_notes/3.13.0.txt 0000664 0000000 0000000 00000020055 14342141206 0017762 0 ustar 00root root 0000000 0000000 = New Plugins
* A json_serializer plugin was added that allows you to serialize
model instances or datasets to JSON using to_json. It requires
the json library. The API was modeled on ActiveRecord's JSON
serialization support. You can use :only and :except options
to specify the columns included, :include to specify associations
to include, as well pass options to nested associations using a
hash. In addition to serializing to JSON, it also adds support
for parsing JSON to model objects via JSON.parse or #from_json.
* An xml_serializer plugin was added that allows you to serialize
model instances or datasets to XML. It requries the nokogiri
library. It has a similar API to the json_serializer plugin, using
to_xml instead of to_json, and the from_xml class method instead
of JSON.parse.
* A tree plugin was added that allows you to treat Sequel::Model
objects as being part of a tree. It provides similar features to
rcte_tree, but works on databases that don't support recursive
common table expressions. In addition to the standard parent
and children associations, it provides instance methods to get
the ancestors, descendants, and siblings of the given tree node,
and class methods to get the roots of the tree.
* A list plugin was added that allows you to treat Sequel::Model
objects as being part of a list. This adds instance methods to
get the next and prev items in the list, or to move the item
to a specific place in the list. You can specify that all rows
in the table belong to the same list, or specify arbitrary scopes
so that the same table can contain many separate lists.
= Other New Features
* Sequel is now compatible with Ruby 1.9.2pre3.
* Sequel now supports prepared transactions/two-phase commit on
PostgreSQL, MySQL, and H2. You can specify that you want to
use prepared transactions using the :prepare option which
should be some transaction id string:
DB.transaction(:prepare=>'some string') do ... end
Assuming that no exceptions are raised in the transaction block,
Sequel will prepare the transaction. You can then commit the
transaction later:
DB.commit_prepared_transaction('some string')
If you need to rollback the prepared transaction, you can do
so as well:
DB.rollback_prepared_transaction('some string')
* Sequel now supports customizable transaction isolation levels on
PostgreSQL, MySQL, and Microsoft SQL Server. You can specify the
transaction isolation level to use for any transaction using the
:isolation option with an :uncommitted, :committed, :repeatable,
or :serializable value:
DB.transaction(:isolation=>:serializable) do ... end
You can also set the default isolation level for transactions via
the transaction_isolation_level Database attribute:
DB.transaction_isolation_level = :committed
If you are connecting to Microsoft SQL Server, it is recommended
that you set a default transaction isolation level if you plan
on using this feature.
* You can specify a NULLS FIRST/LAST ordering by using the
:nulls=>:first/:last option to asc and desc:
Album.filter(:release_date.desc(:nulls=>:first),
:name.asc(:nulls=>:last))
# ORDER BY release_date DESC NULLS FIRST,
# name ASC NULLS LAST
This syntax is supported by PostgreSQL 8.3+, Firebird 1.5+,
Oracle, and probably some other databases as well, and makes it
possible for the user to specify whether NULL values should sort
before or after other values.
* Sequel::Model.find_or_create now accepts a block that is a yielded
a new model object to be created if an existing model object is
not found.
Node.find_or_create(:name=>'A'){|i| i.parent_id = 4}
* The :frame option for windows and window functions can now be a
string that is used literally in the SQL. This is necessary if you
want to specify a custom frame, such as one that uses a specific
number of rows preceding or following.
* Savepoints are now supported on H2.
* A :methods_module association option was added, allowing you to
specify the module into which association instance methods are
placed. By default, it uses the module containing the column
accessor methods.
= Other Improvements
* The :encoding option for the native MySQL adapter should now work
correctly in all cases. This fix was included in 3.12.1.
* Sequel now handles arrays of two element arrays automatically when
using them as the value of a filter hash:
DB[a].filter([:a, :b]=>[[1, 2], [3, 4]])
Previously, you had to call .sql_array on the array in order to
tell Sequel that it was a value list and not a conditions
specifier.
* Sequel no longer attempts to use class polymorphism in the
class_table_inheritance plugin if you don't specify a cti_key.
* When using the native SQLite adapter, prepared statements are now
cached per connection for increased performance. Previously,
Sequel prepared a new statement for every query.
* tinyint(1) columns are now handled as booleans when connecting to
MySQL via JDBC.
* On PostgreSQL, if no :schema option is provided for
Database#tables, #table_exists?, or #schema, and no default_schema
is used, assume all schemas except the default non-public ones.
Previously, it assumed the public schema for tables and
table_exists?, but did not assume any schema for #schema.
This fixes issues if you use table names that overlap with table
names in the information_schema, such as domains. It's still
recommended that you specify a default_schema if you are using a
schema other than public.
* Unsigned integers are now handled correctly in the schema dumper.
* Sequel::SQL::PlaceholderLiteralString is now a GenericExpression
subclass, allowing you to treat it like most other Sequel
expression objects:
'(a || ?)'.lit(:b).like('Test%')
# ((a || b) LIKE 'Test%')
* Sequel now supports the bitwise shift operators (<< and >>) on
Microsoft SQL Server by emulating them.
* Sequel now supports most bitwise operators (&, |, ^, <<, >>) on H2
by emulating them. The bitwise complement operator is not yet
supported.
* Sequel now logs the SQL queries that are sent when connecting to
MySQL.
* If a plugin cannot be loaded, Sequel now gives a more detailed
error message.
= Backwards Compatibility
* Array#sql_array and the Sequel::SQL::SQLArray class are now
considered deprecated. Use the Array#sql_value_list and the
Sequel::SQL::ValueList class instead. SQLArray is now just
an alias for ValueList, but it now is an Array subclass instead
of a Sequel::SQL::Expression subclass.
* Using the ruby bitwise xor operator (^) on PostgreSQL now uses
PostgreSQL's bitwise xor operator (#) instead of PostgreSQL's
exponentiation operator (^). If you want exponentiation, use
the power function.
* Using the ruby bitwise complement operator (~) on MySQL now returns
a signed integer instead of an unsigned integer, for better
compatibility with other databases.
* Using nil as a case expression value (the 2nd argument to Hash#case
and Array#case) will now use NULL as the case expression value,
instead of omitting the case expression value:
# 3.12.0
{1=>2}.case(0, nil)
# CASE WHEN 1 THEN 2 ELSE 0 END
# 3.13.0
{1=>2}.case(0, nil)
# CASE NULL WHEN 1 THEN 2 ELSE 0 END
In general, you would never use nil explicitly, but the new
behavior makes more sense if you have a variable that might be nil:
parent_id = Node[1].parent_id
{1=>2}.case(0, parent_id)
If parent_id IS NULL/nil, then previously Sequel would have
generated unexpected SQL. If you don't want a case expression
value to be used, do not pass a second argument to #case.
* Some internal transaction methods now take an optional options
hash, so if you have a custom adapter, you will need to make
changes.
* Some internal association methods now take an optional options
hash.
* Some Rakefile task names were modified in the name of consistency:
spec_coverage -> spec_cov
integration -> spec_integration
integration_cov -> spec_integration_cov
sequel-5.63.0/doc/release_notes/3.14.0.txt 0000664 0000000 0000000 00000010501 14342141206 0017756 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#grep now accepts :all_patterns, :all_columns, and
:case_insensitive options. Previously, grep would use a case
sensitive search where it would match if any pattern matched any
column. These three options give you more control over how the
pattern matching will work:
dataset.grep([:a, :b], %w'%test% foo')
# WHERE ((a LIKE '%test%') OR (a LIKE 'foo')
# OR (b LIKE '%test%') OR (b LIKE 'foo'))
dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true)
# WHERE (((a LIKE '%foo%') OR (b LIKE '%foo%'))
# AND ((a LIKE '%bar%') OR (b LIKE '%bar%')))
dataset.grep([:a, :b], %w'%foo% %bar%', :all_columns=>true)
# WHERE (((a LIKE '%foo%') OR (a LIKE '%bar%'))
# AND ((b LIKE '%foo%') OR (b LIKE '%bar%')))
dataset.grep([:a, :b], %w'%foo% %bar%',
:all_patterns=>true,:all_columns=>true)
# WHERE ((a LIKE '%foo%') AND (b LIKE '%foo%')
# AND (a LIKE '%bar%') AND (b LIKE '%bar%'))
dataset.grep([:a, :b], %w'%test% foo', :case_insensitive=>true)
# WHERE ((a ILIKE '%test%') OR (a ILIKE 'foo')
# OR (b ILIKE '%test%') OR (b ILIKE 'foo'))
* When using the schema plugin, you can now provide a block to the
create_table methods to set the schema and create the table
in the same call:
class Artist < Sequel::Model
create_table do
primary_key :id
String :name
end
end
* The tree plugin now accepts a :single_root option, which uses a
before_save hook to attempt to ensure that there is only a single
root in the tree. It also adds a Model.root method to get the
single root of the tree.
* The tree plugin now adds a Model#root? instance method to check
if the current node is a root of the tree.
* Model#save now takes a :raise_on_failure option which will
override the object's raise_on_save_failure setting. This makes
it easier to get the desired behavior (raise or just return false)
in library code without using a begin/ensure block.
* The Database#adapter_scheme instance method was added, which
operates the same as the class method.
* Sequel now handles the literalization of OCI8::CLOB objects in
the Oracle adapter.
= Other Improvements
* When using the timezone support, Sequel will now correctly load
times and datetimes in standard time when the current timezone is
in daylight time, or vice versa. Previously, if you tried to
to load a time or datetime in December when in July in a timezone
that used daylight time, it would be off by an hour.
* The rcte_tree plugin now works correctly when a :conditions option
is used.
* The single_table_inheritance plugin now works correctly when the
class discriminator column has the same name as an existing ruby
method (such as type).
* Database#each_server now works correctly when a connection string
is used to connect, instead of an options hash.
* Model#destroy now respects the object's use_transactions setting,
instead of always using a transaction.
* Model#exists? now uses a simpler and faster query.
* Sequel now handles the aggregate methods such as count and sum
correctly on Microsoft SQL Server when using an ordered dataset
with a clause such as DISTINCT or GROUP and without a limit.
* Sequel now handles rename_table correctly on Microsoft SQL Server
when using a case sensitive collation, or when qualifying the
table with a schema.
* Sequel now parses the schema correctly on Oracle when the same
table name is used in multiple schemas.
* Sequel now handles OCIInvalidHandle errors when disconnecting
in the Oracle adapter.
* Sequel now raises a Sequel::Error instead of an ArgumentError
if the current or target migration version does not exist.
* When a mismatched number of composite keys are used in
associations, Sequel now uses a more detailed error message.
* Significant improvements were made to the Dataset and Model
RDoc documentation.
= Backwards Compatibility
* Model#valid? now must accept an optional options hash.
* The Model#save_failure private method was renamed to
raise_hook_failure.
* The LOCAL_DATETIME_OFFSET_SECS and LOCAL_DATETIME_OFFSET constants
have been removed from the Sequel module.
* Sequel now uses obj.to_json instead of JSON.generate(obj). This
shouldn't affect backwards compatibility, but did fix a bug in
certain cases.
sequel-5.63.0/doc/release_notes/3.15.0.txt 0000664 0000000 0000000 00000005540 14342141206 0017766 0 ustar 00root root 0000000 0000000 = Performance Enhancements
* A mysql2 adapter was added to Sequel. It offers a large (2-6x)
performance increase over the standard mysql adapter. In order to
use it, you need to install mysql2, and change your connection
strings to use mysql2:// instead of mysql://.
* Support for sequel_pg was added to the native postgres adapter,
when pg is being used as the backend. sequel_pg also offers a
large (2-6x) performance increase over the default row fetching
code that the Sequel postgres adapter uses. In order to use it,
you just need to install sequel_pg, and the postgres adapter will
pick it up automatically.
* Mass assignment has been made about 10x faster by caching the
allowed setter methods in the model.
= Other Improvements
* The following construct is now safe to use in environments that
reload code without unloading existing constants:
class MyModel < Sequel::Model(DB[:table])
end
Previously, this would raise a superclass mismatch TypeError.
* Sequel now handles the case where both an implicit and an explicit
table alias are given to join_table, preferring the explicit alias.
This can happen if you are using models with aliased table names
and eager graphing them. Previously, this would result in invalid
SQL, with both aliases being used.
* You can use use an aliased table for the :join_table option
of a many_to_many association.
* The active_model plugin now supports the final release of
ActiveModel 3.0.0.
* Typecasting now works correctly for attributes loaded lazily
when using the lazy_attributes plugin.
* The class_table_inheritance plugin now works with non-integer
primary keys on SQLite.
* Temporary tables are now ignored when parsing the schema on
PostgreSQL.
* On MySQL, an :auto_increment key with a true value is added to
the Database#schema output hash if the related column is
auto incrementing.
* The mysql adapter now handles Mysql::Error exceptions raised when
disconnecting.
* On SQLite, emulated alter_table commands that require dropping
the table now preserve the foreign key information, if SQLite
foreign key support is enabled (it is by default).
* DSN-less connections now work correctly in more cases in the
ODBC adapter.
* A workaround has been added for a bug in the Microsoft SQL
Server JDBC Driver 3.0, involving it incorrectly returning a
smallint instead of a char type for the IS_AUTOINCREMENT
metadata value.
* An bug in the error handling when connecting to PostgreSQL using
the do (DataObjects) adapter has been fixed.
= Backwards Compatibility
* The caching of allowed mass assignment methods can result in the
incorrect exception class being raised if you manually undefine
instance setter methods in the model class. If you do this, you
need to clear the setter methods cache manually:
MyModel.clear_setter_methods_cache
sequel-5.63.0/doc/release_notes/3.16.0.txt 0000664 0000000 0000000 00000003346 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Adapter
* A swift adapter was added to Sequel. Swift is a relatively new
ruby database library, built on top of a relatively new backend
called dbic++. While not yet considered production ready, it is
very fast. The swift adapter is about 33% faster and 40% more
memory efficient for selects than the postgres adapter using pg
with sequel_pg, though it is slower and less memory efficient
for inserts and updates.
Sequel's swift adapter currently supports only PostgreSQL and
MySQL, but support for other databases will probably be added in
the future.
= Other Improvements
* Sequel now correctly literalizes DateTime objects on ruby 1.9 for
databases that support fractional seconds.
* The identity_map plugin now handles composite keys in many_to_one
associations.
* The rcte_tree plugin now works when the model's dataset does not
select all columns. This can happen when using the lazy_attributes
plugin on the same model.
* Sequel now supports INTERSECT and EXCEPT on Microsoft SQL Server
2005+.
* The Database#create_language method in the shared PostgreSQL
adapter now accepts a :replace option to replace the currently
loaded procedural language if it already exists. This option
is ignored for PostgreSQL versions before 9.0.
* The identity_map plugin now handles cases where the plugin is
loaded separately by two different models.
= Backwards Compatibility
* While not technically backwards compatibility related, it was
discovered that the identity_map plugin is incompatible with
the standard eager loading of many_to_many and many_through_many
associations. If you want to eagerly load those associations and
use the identity_map plugin, you should use eager_graph instead
of eager.
sequel-5.63.0/doc/release_notes/3.17.0.txt 0000664 0000000 0000000 00000004445 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* You can now change the level at which Sequel logs SQL statements,
by calling Database#sql_log_level= with the method name symbol.
The default is still :info for backwards compatibility. Previously,
you had to use a proxy logger to get similar capability.
* You can now specify graph aliases where the alias would be the same
as the table column name using just the table name symbol, instead
having to repeat the alias as the second element of an array. More
clearly:
# < 3.17.0:
DB[:a].graph(:b, :a_id=>:id).
set_graph_aliases(:c=>[:a, :c], :d=>[:b, :d])
# >= 3.17.0:
DB[:a].graph(:b, :a_id=>:id).set_graph_aliases(:c=>:a, :d=>:b)
Both of these now yield the SQL:
SELECT a.c, b.d FROM a LEFT OUTER JOIN b ON (b.a_id = a.id)
* You should now be able to connect to MySQL over SSL in the native
MySQL adapter using the :sslca, :sslkey, and related options.
* Database#views and Database#view_exists? methods were added to the
Oracle adapter, allowing you to get a an array of view name symbols
and to check whether a given view exists.
= Other Improvements
* The nested_attributes plugin now avoids unnecessary update calls
when deleting associated objects, resulting in better performance.
* The optimistic_locking plugin now increments the lock column if no
other columns were modified but the Model#modified! was called. This
means it now works correctly with the nested_attributes plugin when
no changes to the main model object are made.
* The xml_serializer plugin can now round-trip nil values correctly.
Previously, nil values would be converted into empty strings. This
is accomplished by including a nil attribute in the xml tag.
* Database#each_server now works correctly when using the jdbc and do
adapters and a connection string without a separate :adapter option.
* You can now clone many_through_many associations.
* The default wait_timeout used by the mysql and mysql2 adapters was
decreased slightly so that it works correctly with MySQL database
servers that run on Windows.
* Many improvements were made to the AS400 jdbc subadapter.
* Many improvements were made to the swift adapter and subadapters.
* Dataset#ungraphed now removes any cached graph aliases set with
set_graph_aliases or add_graph_aliases.
sequel-5.63.0/doc/release_notes/3.18.0.txt 0000664 0000000 0000000 00000010070 14342141206 0017763 0 ustar 00root root 0000000 0000000 = New Features
* Reversible migration support has been added:
Sequel.migration do
change do
create_table(:artists) do
primary_key :id
String :name, :null=>false
end
end
end
The change block acts the same way as an up block, except that
it automatically creates a down block that reverses the changes.
So the above is equivalent to:
Sequel.migration do
up do
create_table(:artists) do
primary_key :id
String :name, :null=>false
end
end
down do
drop_table :artists
end
end
The following methods are supported in a change block:
* create_table
* add_column
* add_index
* rename_column
* rename_table
* alter_table (supporting the following methods):
* add_column
* add_constraint
* add_foreign_key (with a symbol, not an array)
* add_primary_key (with a symbol, not an array)
* add_index
* add_full_text_index
* add_spatial_index
* rename_column
Use of an other method in a change block will result in the
creation of a down block that raises an exception.
* A to_dot extension has been added that adds a Dataset#to_dot
method, which returns a string that can be used as input to
the graphviz dot program in order to create visualizations
of the dataset's abstract syntax tree. Examples:
* http://sequel.jeremyevans.net/images/to_dot_simple.gif
* http://sequel.jeremyevans.net/images/to_dot_complex.gif
Both the to_dot extension and reversible migrations support
were inspired by Aaron Patterson's recent work on ActiveRecord
and ARel.
* The user can now control how the connection pool handles attempts
to access shards that haven't been configured. The default is
still to assume the :default shard. However, you can specify a
different shard using the :servers_hash option when connecting
to the database:
DB = Sequel.connect(..., :servers_hash=>Hash.new(:some_shard))
You can also use this feature to raise an exception if an
unconfigured shard is used:
DB = Sequel.connect(..., :servers_hash=>Hash.new{raise ...})
* The mysql and mysql2 adapters now both support the :read_timeout
and :connect_timeout options. read_timeout is the timeout in
seconds for reading back results of a query, and connect_timeout
is the timeout in seconds before a connection attempt is abandoned.
= Other Improvements
* The json_serializer plugin will now typecast column values for
columns with unrestricted setter methods when parsing JSON into
model objects. It now also calls the getter method when creating
the JSON, instead of directly taking the values from the underlying
hash.
* When parsing the schema for a model with an aliased table name,
the unaliased table name is now used.
* The SQLite adapter has been updated to not rely on the native
type_translation support, since that will be removed in the next
major version of sqlite3-ruby. Sequel now implements it's own
type translation in the sqlite adapter, similarly to how the mysql
and postgres adapters handle type translation.
* On SQLite, when emulating natively unsupported schema methods such
as drop_column, Sequel will now attempt to recreate applicable
indexes on the underlying table.
* A more informative error message is now used when connecting fails
when using the jdbc adapter.
* method_missing is no longer removed from Sequel::BasicObject on
ruby 1.8. This should improve compatibility in some cases with
Rubinius.
= Backwards Compatibility
* On SQLite, Sequel no longer assumes that a plain integer in a
datetime or timestamp field represents a unix epoch time.
* Previously, saving a model object that used the instance_hooks
plugin removed all instance hooks. Now, only the applicable hooks
are removed. So if you save a new object, the update instance
hooks won't be removed. And if you save an existing object, delete
instance hooks won't be removed.
* The private Dataset#identifier_list method has been moved into the
SQLite adapter, since that is the only place it was used.
sequel-5.63.0/doc/release_notes/3.19.0.txt 0000664 0000000 0000000 00000005235 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* The add_* association methods now accept a primary key, and
associates the receiver to the associated model object with that
primary key:
artist.add_album(42)
# equivalent to: artist.add_album(Album[42])
* The validation_class_methods plugin now has the ability to
reflect on validations:
Album.plugin :validation_class_methods
Album.validates_acceptance_of(:a)
Album.validation_reflections
# => {:a=>[[:acceptance, {:tag=>:acceptance, :allow_nil=>true,
:message=>"is not accepted", :accept=>"1"}]]}
= Other Improvements
* In the postgres, mysql, and sqlite adapters, typecasting now uses
methods instead of procs. Since methods aren't closures (while
procs are), this makes typecasting faster (up to 15%).
* When typecasting model column values, the classes of the new and
existing values are checked in addition to the values themselves.
Previously, if the new and existing values were equal (i.e. 1.0
and 1), it wouldn't update the value. Now, if the classes are
different, it always updates the value.
* Date and DateTime objects are now handled correctly when using
prepared statements/bound variables in the jdbc adapter.
* Date, DateTime, Time, true, false, and SQL::Blob objects are now
handled correctly when using prepared statements/bound variables
in the sqlite adapter.
* Sequel now uses varbinary(max) instead of image for the generic
File (blob) type on Microsoft SQL Server. This makes it possible
to use an SQL::Blob object as a prepared statement argument.
* Sequel now handles blobs better in the Amalgalite adapter.
* When disconnecting a connection using the sqlite adapter, all
open prepared statements are now closed first. Previously,
attempting to disconnect a connection with open prepared statements
resulted in an error.
* The license file has been renamed from COPYING to MIT-LICENSE, to
make it easier to determine at a glance which license is used.
= Backwards Compatibility
* Because Sequel switched the generic File type from image to
varbinary(max) on Microsoft SQL Server, any migrations/schema
modification methods that used the File type will now result in a
different column type than before.
* The MYSQL_TYPE_PROCS, PG_TYPE_PROCS, and SQLITE_TYPE_PROCS
constants have been removed from the mysql, postgres, and sqlite
adapters, respectively. The UNIX_EPOCH_TIME_FORMAT and
FALSE_VALUES constants have also been removed from the sqlite
adapter.
* Typecasting in the sqlite adapters now uses to_i and to_f instead
of Integer() and Float() with rescues. If you put non-numeric
data in numeric columns on SQLite, this could cause problems.
sequel-5.63.0/doc/release_notes/3.2.0.txt 0000664 0000000 0000000 00000025047 14342141206 0017706 0 ustar 00root root 0000000 0000000 New Features
------------
* Common table expressions (CTEs) are now supported. CTEs use the
SQL WITH clause, and specify inline views that queries can use.
They also support a recursive mode, where the CTE can recursively
query its own output, allowing you do do things like load all
branches for a given node in a plain tree structure.
The standard with takes an alias and a dataset:
DB[:vw].with(:vw, DB[:table].filter{col < 1})
# WITH vw AS (SELECT * FROM table WHERE col < 1)
# SELECT * FROM vw
The recursive with takes an alias, a nonrecursive dataset, and a
recursive dataset:
DB[:vw].with_recursive(:vw,
DB[:tree].filter(:id=>1),
DB[:tree].join(:vw, :id=>:parent_id).
select(:vw__id, :vw__parent_id))
# WITH RECURSIVE vw AS (SELECT * FROM tree
# WHERE (id = 1)
# UNION ALL
# SELECT vw.id, vw.parent_id
# FROM tree
# INNER JOIN vw ON (vw.id = tree.parent_id))
# SELECT * FROM vw
CTEs are supported by Microsoft SQL Server 2005+, DB2 7+,
Firebird 2.1+, Oracle 9+, and PostgreSQL 8.4+.
* SQL window functions are now supported, and a DSL has been added to
ease their creation. Window functions act similarly to aggregate
functions but operate on sliding ranges of rows.
In virtual row blocks (blocks passed to filter, select, order, etc.)
you can now provide a block to method calls to change the default
behavior to create functions that weren't possible previously. The
blocks aren't called, but their presence serves as a flag.
What function is created depends on the arguments to the method:
* If there are no arguments, an SQL::Function is created with the
name of method used, and no arguments. Previously, it was not
possible to create functions without arguments using the virtual
row block DSL. Example:
DB.dataset.select{version{}} # SELECT version()
* If the first argument is :*, an SQL::Function is created with a
single wildcard argument (*). This is mostly useful for count:
DB[:t].select{count(:*){}} # SELECT count(*) FROM t
* If the first argument is :distinct, an SQL::Function is created
with the keyword DISTINCT prefacing all remaining arguments. This
is useful for aggregate functions such as count:
DB[:t].select{count(:distinct, col1){}}
# SELECT count(DISTINCT col1) FROM t
* If the first argument is :over, the second argument, if provided,
should be a hash of options to pass to SQL::Window. The options
hash can also contain :*=>true to use a wildcard argument as the
function argument, or :args=>... to specify an array of arguments
to use as the function arguments.
DB[:t].select{rank(:over){}} # SELECT rank() OVER ()
DB[:t].select{count(:over, :*=>true){}} # SELECT count(*) OVER ()
DB[:t].select{sum(:over, :args=>col1,
:partition=>col2, :order=>col3){}}
# SELECT sum(col1) OVER (PARTITION BY col2 ORDER BY col3)
PostgreSQL also supports named windows. Named windows can be
specified by Dataset#window, and window functions can reference
them using the :window option.
* Schema information for columns now includes a :ruby_default entry
which contains a ruby object that represents the default given by
the database (which is stored in :default). Not all :default
entries can be parsed into a :ruby_default, but if the
schema_dumper extension previously supported it, it should work.
* Methods to create compound datasets (union, intersect, except), now
take an options hash instead of a true/false flag. The previous
API is still supported, but switching to specifying the ALL setting
using :all=>true is recommended.
Additionally, you can now set :from_self=>false to not wrap the
returned dataset in a "SELECT * FROM (...)".
* Dataset#ungraphed was added that removes the graphing information
from the dataset. This allows you to use Dataset#graph for the
automatic aliasing, or #eager_graph for the automatic aliasing and
joining, and then remove the graphing information so that the
resulting objects will not be split into subhashes or associations.
* There were some introspection methods added to Dataset to describe
which capabilities that dataset does or does not support:
supports_cte?
supports_distinct_on?
supports_intersect_except?
supports_intersect_except_all?
supports_window_functions?
In addition to being available for the user to use, these are also
used internally, so attempting to use a CTE on a dataset that
doesn't support it will raise an Error.
* Dataset#qualify was added, which is like qualify_to with a default
of first_source.
Additionally, qualify now affects PlaceholderLiteralStrings. It
doesn't scan the string (as Sequel never attempts to parse SQL),
but if you provide the column as a symbol placeholder argument, it
will qualify it.
* You can now specify the table and column Sequel::Migrator will use
to record the current schema version. The new Migrator.run method
must be used to use these new options.
* The JDBC adapter now accepts :user and :password options, instead
of requiring them to be specified in the connection string and
handled by the JDBC driver. This should allow connections to
Oracle using the Thin JDBC driver.
* You can now specify the max_connections, pool_timeout, and
single_threaded settings directly in the connection string:
postgres:///database?single_threaded=t
postgres:///database?max_connections=10&pool_timeout=20
* Dataset#on_duplicate_key_update now affects Dataset#insert when
using MySQL.
* You can now specify the :opclass option when creating PostgreSQL
indexes. Currently, this only supports a single operator class
for all columns. If you need different operator classes per
column, please post on sequel-talk.
* Model#autoincrementing_primary_key was added and can be used if
the autoincrementing key isn't the same as the primary key. The
only likely use for this is on MySQL MyISAM tables with composite
primary keys where only one of the composite parts is
autoincrementing.
* You can now use database column values as search patterns and
specify the text to search as a String or Regexp:
String.send(:include, Sequel::SQL::StringMethods)
Regexp.send(:include, Sequel::SQL::StringMethods)
'a'.like(:x) # ('a' LIKE x)
/a/.like(:x) # ('a' ~ x)
/a/i.like(:x) # ('a' ~* x)
/a/.like(:x, 'b') # (('a' ~ x) OR ('a' ~ 'b'))
* The Dataset#dataset_alias private method was added. It can be
overridden if you have tables named t0, t1, etc., and want to make
sure the default dataset aliases that Sequel uses do not clash
with existing table names.
* Sequel now raises an Error if you call Sequel.connect with
something that is not a Hash or String.
* bin/sequel now accepts a -N option to not test the database
connection.
* An opening_databases.rdoc file was added to the documentation
directory, which should be a good introduction for new users about
how to set up your Database connection.
Other Improvements
------------------
* MySQL native adapter SELECT is much faster than before, up to 75%
faster.
* JDBC SELECT is about 10% faster than before. It's still much
slower than the native adapters, due to conversion issues.
* bin/sequel now works with a YAML file on ruby 1.9.
* MySQL foreign key table constraints have been fixed.
* Database#indexes now works on PostgreSQL if the schema used is a
Symbol. It also works on PostgreSQL versions all the way back to
7.4.
* Graphing of datasets with dataset sources has been fixed.
* Changing a columns name, type, or NULL status on MySQL now
supports a much wider selection of column defaults.
* The stored procedure code is now thread-safe. Sequel is
thread-safe in general, but due to a bug the previous stored
procedure code was not thread-safe.
* The ODBC adapter now drops statements automatically instead of
requiring the user to do so manually, making it more similar
to other adapters.
* The single_table_inheritance plugin no longer overwrites the STI
field if the field already has a value. This allows you to use
create in the generic class to insert a value that will be
returned as a subclass:
Person.create(:kind => "Manager")
* When altering colums on MySQL, :unsigned, :elements, :size and other
options given are no longer ignored.
* The PostgreSQL shared adapter's explain and analyze methods have
been fixed, they had been broken in 3.0.
* Parsing of the server's version is more robust on PostgreSQL.
It should now work correctly for 8.4 and 8.4rc1 type versions.
Backwards Compatibility
-----------------------
* Dataset#table_exists? has been removed, since it never worked
perfectly. Use Database#table_exists? instead.
* Model.grep now calls Dataset#grep instead of Enumerable#grep.
If you are using Model.grep, you need to modify your application.
* The MSSQL shared adapter previously used the :with option for
storing the NOLOCK setting of the query. That option has been
renamed to :table_options, since :with is now used for CTEs.
This should not have an effect unless you where using the option
manually.
* Previously, providing a block to a method calls in virtual row
blocks did not change behavior, where now it causes a different
code path to be used. In both cases, the block is not evaluated,
but that may change in a future version.
* Dataset#to_table_reference protected method was removed, as it was
no longer used.
* The pool_timeout setting is now converted to an Integer, so if you
used to pass in a Float, it no longer works the same way.
* Most files in adapters/utils have been removed, in favor of
integrating the code directly into Database and Dataset. If you
were previously checking for the UnsupportedIntersectExcept or
related modules, use the Dataset introspection methods instead
(e.g. supports_intersect_except?).
* If you were using the ODBC adapter and manually dropping returned
statements, you should note that now statements are dropped
automatically, and the execute method doesn't return a statement
object.
* The MySQL adapter on_duplicate_key_update_sql is now a private
method.
* If you were modifying the :from dataset option directly, note that
Sequel now expects this option to be preprocessed. See the new
implementation of Dataset#from for an idea of the changes
required.
* Dataset#simple_select_all? now returns false instead of true for a
dataset that selects from another dataset.
sequel-5.63.0/doc/release_notes/3.20.0.txt 0000664 0000000 0000000 00000002770 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* The swift adapter now supports an SQLite subadapter. Use the
:db_type => 'sqlite' option when connecting. You can use an
in memory database with the following connection string:
swift:///?database=:memory:&db_type=sqlite
* Arbitrary JDBC properties can now be set in the JDBC adapter
using the :jdbc_properties option when connecting. The value
of this option should be a hash where keys and values are JDBC
property keys and values.
* Basic Microsoft Access support was added to the ado adapter.
The creation of autoincrementing primary key integers now works,
and identifiers are now quoted with [].
* The Database#indexes method now supports a :partial option when
connecting to MySQL, which makes it include partial indexes (which
are usually excluded).
= Other Improvements
* The class_table_inheritance plugin now handles subclass
associations better. Previously, the implicit eager loading code
had issues when you called an association method that only existed
in the subclass.
* The error message used when a validates_max_length validation is
applied to a nil column value has been improved. You can override
the message yourself using the :nil_message option.
* The read_timeout and connect_timeout options now work correctly in
the mysql adapter.
* Another MySQL disconnect error message is now recognized.
= Backwards Compatibility
* The swift adapter was upgraded to support swift 0.8.1. Older
versions of swift are no longer supported.
sequel-5.63.0/doc/release_notes/3.21.0.txt 0000664 0000000 0000000 00000006640 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* A tinytds adapter was added, enabling Sequel users on a C-based
ruby running on *nix easy access to Microsoft SQL Server.
Previously, the best way to connect to Microsoft SQL Server from
a C-based ruby on *nix was to use the ODBC adapter with unixodbc
and freetds. However, setting that up is nontrivial, while
setting up tinytds is very easy.
Note that the tinytds adapter currently requires the git master
branch of tiny_tds, but tiny_tds 0.4.0 should include the related
changes.
* An association_autoreloading plugin has been added to Sequel,
which removes stale many_to_one associations from the cache when
the associated foreign key setter is used to change the value of
the foreign key.
* bin/sequel now operates more like a standard *nix utility.
If given a file on the command line after the connection
string, it will assume that file has ruby code and load it.
If stdin is not a tty, it will read from stdin and execute it
as ruby code.
For recent Linux users, this means you can have a shebang line
such as:
#!/usr/bin/sequel postgres://user:pass@host/db
to create a self contained script.
* bin/sequel now supports -r and -I options similar to ruby's
-r and -I options.
* MySQL datasets now have a calc_found_rows method that uses
SQL_CALC_FOUND_ROWS, which provides a fast way to limit the
number of results returned by a dataset while having an easy
way to determine how many rows would have been returned if no
limit was applied.
= Other Improvements
* The Sequel::Model.plugin method can now be overridden just like
any other method. Previously, it was the only method that
was defined directly on the class. This allows the creation
of plugins that can modify the plugin system itself.
* Symbol splitting (:table__column___alias) now works correctly
for identifiers that include characters that aren't in [\w ].
Among other things, this means that identifiers with accented
characters or even kanji characters can be used with symbol
splitting.
* If cover? is defined, it is now used in preference to include?
for the validates_includes/validates_inclusion_of validations.
ruby 1.9 defines include? differently for some ranges and can
be very slow, while cover? is similar to the 1.8 behavior of
just checking the beginning and end of the range.
* The bin/sequel -L option now takes effect even if the -m,
-C, -d, or -D options are used.
* The schema_dumper extension now recognizes the "bigint unsigned"
type.
* On Microsoft SQL Server, if joining to a subselect that uses a
common table expression, that common table expression is
promoted to the main dataset. This allows most queries to work
correctly, but is vulnerable to issues if both the current
dataset and the joined dataset use common table expressions with
the same name. Unfortunately, unlike PostgreSQL, Microsoft SQL
Server does not allow common table expressions to occur in
subselects.
* The NULL/NOT NULL, DEFAULT, and UNIQUE column options now
use the proper order on H2 and Oracle, so they can now be
used in conjunction with each other.
* Row locks are now enabled on Oracle.
* The join_table method on MySQL no longer ignores the block it was
given.
* The informix adapter now supports ruby-informix version >= 0.7.3,
while still being backwards compatible with older versions.
* The specs now run under both RSpec 2 and RSpec 1.
sequel-5.63.0/doc/release_notes/3.22.0.txt 0000664 0000000 0000000 00000002750 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* Support COLLATE in column definitions. At least MySQL and Microsoft
SQL Server support them, and PostgreSQL 9.1 should as well.
* When connecting to Microsoft SQL Server, you can use the
mssql_unicode_strings accessor to turn of the default usage
of unicode strings (N'') and use regular strings (''). This
can improve performance, but changes the behavior. It's
set to true by default for backwards compatibility. You can
change it at both the dataset and database level:
DB.mssql_unicode_strings = false # default for datasets
dataset.mssql_unicode_strings = false # just this dataset
* In the oracle adapter, if Sequel.application_timezone is :utc, set
the timezone for the connection to use the 00:00 timezone.
= Other Improvements
* In the single_table_inheritance plugin, correctly handle a
multi-level class hierarchy so that loading instances from a
middle level of the hierarchy can return instances of subclasses.
* Don't use a schema when creating a temporary table, even if
default_schema is set.
* Fix the migrator when a default_schema is used.
* In the ado adapter, assume a connection to SQL Server if the
:conn_string is given and doesn't indicate Access/Jet.
* Fix fetching rows in the tinytds adapter when the
identifier_output_method is nil.
* The tinytds adapter now checks for disconnect errors, but it might
not be reliable until the next release of tiny_tds.
* The odbc adapter now handles ODBC::Time instances correctly.
sequel-5.63.0/doc/release_notes/3.23.0.txt 0000664 0000000 0000000 00000015461 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now allows dynamic customization for eager loading.
Previously, the parameters for eager loading were fixed at
association creation time. Now, they can be modified at query
time. To dynamically modify an eager load, you use a hash with
the proc as the value. For example, if you have this code:
Artist.eager(:albums)
And you only want to eagerly load albums where the id is greater
than or equal to some number provided by the user, you do:
min = params[:min].to_i
Artist.eager(:albums=>proc{|ds| ds.where{id > min}})
This also works when eager loading via eager_graph:
Artist.eager_graph(:albums=>proc{|ds| ds.where{id > min}})
For eager_graph, the dataset is the dataset to graph into the
current dataset, and filtering it will result in an SQL query
that joins to a subquery.
You can also use dynamic customization while cascading to also
eagerly load dependent associations, by making the hash value
a single entry hash with a proc key and the value being the
dependent associations to eagerly load. For example, if you want
to eagerly load tracks for those albums:
Artist.eager(:albums=>{proc{|ds| ds.where{id > min}}=>:tracks})
* Sequel also now allows dynamic customization for regular
association loading. Previously, this was possible by using the
association's dataset:
albums = artist.albums_dataset.filter{id > min}
However, then there was no handling of caching, callbacks, or
reciprocals. For example:
albums.each{|album| album.artist}
Would issue one query per album to get the artist, because the
reciprocal association was not set. Now you can provide a
block to the association method:
albums = artist.albums{|ds| ds.filter{id > min}}
This block is called with the dataset used to retrieve the
associated objects, and should return a modified version of that
dataset.
Note that ruby 1.8.6 doesn't allow blocks to take block arguments,
so you have to pass the block as a separate proc argument to the
association method if you are still using 1.8.6.
* Sequel now supports filtering by associations. This wasn't
previously supported as filtering is a dataset level feature and
associations are a model level feature, and datasets do not depend
on models. Now, model datasets have the ability to filter by
associations. For example, to get all albums for a given artist,
you could do:
artist = Artist[1]
Album.filter(:artist=>artist)
Since the above can also be accomplished with:
artist.albums
this may not seem like a big improvement, but it allows you to
filter on multiple associations simultaneously:
Album.filter(:artist=>artist, :publisher=>publisher)
For simple many_to_one associations, the above is just a simpler
way to do:
Album.filter(:artist_id=>artist.id, :publisher_id=>publisher.id)
Sequel supports this for all association types, including
many_to_many and many_through_many, where a subquery is used, and
it also works when composite key associations are used:
Album.filter(:artist=>artist, :tags=>tag)
This will give you the albums for that artist that are also tagged
with that tag. To provide multiple values for the same
association, mostly useful for many_to_many associations, you can
either use separate filter calls or specify the conditions as an
array:
Album.filter(:tags=>tag1).filter(:tags=>tag2)
Album.filter([[:tags, tag1], [:tags, tag2]])
* A columns_introspection extension has been added that makes
datasets attempt to guess their columns in some cases instead of
issuing a database query. This can improve performance in cases
where the columns are needed implicitly, such as graphing. After
loading the extension, you can enable the support for specific
datasets by extending them with Sequel::ColumnIntrospection. To
enable introspection for all datasets, use:
Sequel::Dataset.introspect_all_columns
* A serialization_modification_detection plugin has been added.
Previously, Sequel could not detect modifications made to
serialized objects. It could detect modification if you assigned
a new value:
model.hash_column = model.hash_column.merge(:foo=>:bar)
but not if you just modified the object directly:
model.hash_columns[:foo] = :bar
With this plugin, such modifications can be detected, at a
potentially significant performance cost.
= Other Improvements
* When using a migration directory containing both older integer
migrations and newer timestamp migrations, where some integer
migrations have not been applied, make sure to apply the remaining
integer migrations before the timestamp migrations. Previously,
they could be applied out of order due to a lexicographic sort
being used instead of a numeric sort.
* If a model does not select all columns from its table, the
insert_select optimization is no longer used. Previously,
creating a new model object for such a model could result in the
object containing columns that the model does not select.
* You can now use :select=>[] as an option for many_to_many
associations to select all columns from both the associated
table and the join table. Previously, this raised an error and
required you do :select=>'*'.lit as a workaround. The default
remains to select all columns in the associated table and none
from the join table.
* The xml_serializer plugin now handles namespaced models by
using __ instead of / as the namespace separator. Previously, /
was used and caused problems as it is not valid XML.
* The :eager_grapher association option can now accept a proc that
takes a single hash of options instead of a fixed 3 arguments.
This is the recommended way going forward of writing custom
:eager_graphers, and all of the internal ones have been converted.
The previous way of using 3 arguments is still supported.
* A bug in the identity_map plugin for many_to_one associations
without full association reflection information has been fixed.
* Sequel is now using GitHub Issues for issue tracking. Old issues
have been migrated from Google Code.
= Backwards Compatibility
* The filter by associations support breaks backward compatibilty for
users who previously added an sql_literal instance method to
Sequel::Model. Usually, that was done to for reasons similar to
but inferior than the filter by association support. The following
code can be used as a temporary workaround until you can modify
your program to use the new filter by associations support:
Sequel::Model::Associations::DatasetMethods.
send(:remove_method, :complex_expression_sql)
* The private Sequel::Model#_load_associated_objects method now takes
an additional, optional options hash. Plugins that override that
method need to be modified.
sequel-5.63.0/doc/release_notes/3.24.0.txt 0000664 0000000 0000000 00000037765 14342141206 0020004 0 ustar 00root root 0000000 0000000 = Prepared Statement Plugins
* The prepared_statements plugin makes Sequel::Model classes use
prepared statements for creating, updating, and destroying model
instances, as well as looking up model objects by primary key.
With this plugin, all of the following will use prepared
statements:
Artist.plugin :prepared_statements
Artist.create(:name=>'Foo')
a = Artist[1]
a.update(:name=>'Bar')
a.destroy
* The prepared_statements_safe plugin reduces the number of
prepared statements that can be created by doing two things. First,
it makes the INSERT statements used when creating instances to use
as many columns as possible, setting specific values for all
columns with parseable default values. Second, it changes
save_changes to just use save, saving all columns instead of just
the changed ones.
The reason for this plugin is that Sequel's default behavior of
using only the values specifically set when creating instances
and having update only set changed columns by default can lead
to a large number of prepared statements being created.
For prepared statements to be used, each set of columns in the
insert and update statements needs to have its own prepared
statement. If you have a table with 1 primary key column and
4 other columns, you can have up to 2^4 = 16 prepared statements
created, one for each subset of the 4 columns. If you have 1
primary key column and 20 other columns, there are over a million
subsets, and you could hit your database limit for prepared
statements (a denial of service attack).
Using the prepared_statements_safe plugin mitigates this
issue by reducing the number of columns that may or may not be
present in the query, in many cases making sure that each model
will only have a single INSERT and a single UPDATE prepared
statement.
* The prepared_statements_associations plugin allows normal
association method calls to use prepared statements if possible.
For example:
Artist.plugin :prepared_statements_associations
Artist.many_to_one :albums
Artist[1].albums
Will use a prepared statement to return the albums for that artist.
This plugin works for all supported association types. There are
some associations (filtered and custom associations) that Sequel
cannot currently use a prepared statement reliably, for those
Sequel will use a regular query.
* The prepared_statements_with_pk plugin allows the new
Dataset#with_pk method (explained below) to use prepared statements.
For example:
Artist.plugin :prepared_statements_with_pk
Artist.filter(...).with_pk(1)
Will use a prepared statement for this query. The most benefit
from prepared statements come from queries that are expensive to
parse and plan but quick to execute, so using this plugin with
a complex filter can in certain cases yield significant performance
improvements.
However, this plugin should be considered unsafe as it is possible
that it will create an unbounded number of prepared statements. It
extracts parameters from the dataset using Dataset#unbind
(explained below), so if your code has conditions that vary per
query but that Dataset#unbind does not handle, an unbounded number
of prepared statements can be created. For example:
Artist.filter(:a=>params[:b].to_i).with_pk[1]
Artist.exclude{a > params[:b].to_i}.with_pk[1]
are safe, but:
Artist.filter(:a=>[1, params[:b].to_i]).with_pk[1]
Artist.exclude{a > params[:b].to_i + 2}.with_pk[1]
are not. For queries that are not safe, Dataset#with_pk should
not be used with this plugin, you should switch to looking up by
primary key manually (for a regular query):
Artist.filter(:a=>[1, params[:b].to_i])[:id=>1]
or using the prepared statement API to create a custom prepared
statement:
# PS = {}
PS[:name] ||= Artist.filter(:a=>[1, :$b], :id=>:$id).
prepare(:select, :name)
PS[:name].call(:b=>params[:b].to_i, :id=>1)
= Other New Features
* Filtering by associations got a lot more powerful. Sequel 3.23.0
introduced filtering by associations:
Album.filter(:artist=>artist)
This capability is much expanded in 3.24.0, allowing you to
exclude by associations:
Album.exclude(:artist=>artist)
This will match all albums not by that artist.
You can also filter or exclude by multiple associated objects:
Album.filter(:artist=>[artist1, artist2])
Album.exclude(:artist=>[artist1, artist2])
The filtered dataset will match all albums by either of those
two artists, and the excluded dataset will match all albums not
by either of those two artists.
You can also filter or exclude by using a model dataset:
Album.filter(:artist=>Artist.filter(:name.like('A%'))).all
Album.exclude(:artist=>Artist.filter(:name.like('A%'))).all
Here the filtered dataset will match all albums where the
associated artist has a name that begins with A, and the excluded
dataset will match all albums where the associated artist does not
have a name that begins with A.
All of these types of filtering and excluding work with all of
association types that ship with Sequel, even the many_through_many
plugin.
* Sequel now supports around hooks, which wrap the related before
hook, behavior, and after hook. Like other Sequel hooks, these
are implemented as instance methods. For example, if you wanted
to log DatabaseErrors raised during save:
class Artist < Sequel::Model
def around_save
super
rescue Sequel::DatabaseError => e
# log the error
raise
end
end
All around hooks should call super, not yield. If an around hook
doesn't call super or yield, it is treated as a hook failure,
similar to before hooks returning false.
For around_validation, the return value of super should be whether
the object is valid. For other around hooks, the return value of
super is currently true, but it's possible that will change in the
future.
* Dataset#with_pk has been added to model datasets that allows you
to find the object with the matching primary key:
Artist.filter(:name.like('A%')).with_pk(1)
This should make easier the common case where you want to find
a particular object that is associated to another object:
Artist[1].albums_dataset.with_pk(2)
Before, there was no way to do that without manually specifying
the primary key:
Artist[1].albums_dataset[:id=>2]
To use a composite primary key with with_pk, you have to provide
an array:
Artist[1].albums_dataset.with_pk([1, 2])
* Dataset#[] for model datasets will now call with_pk if given a
single Integer argument. This makes the above case even easier:
Artist[1].albums_dataset[2]
Note that for backwards compatibility, this only works for
single integer primary keys. If you have a composite primary key
or a string/varchar primary key, you have to use with_pk.
* Dataset#unbind has been added, which allows you to take a dataset
that uses static bound values and convert them to placeholders.
Currently, the only cases handled are SQL::ComplexExpression
objects that use a =, !=, <, >, <=, or >= operator where the first
argument is a Symbol, SQL::Indentifier, or
SQL::QualifiedIdentifier, and the second argument is a Numeric,
String, Date, or Time. Dataset#unbind returns a two element array,
where the first element is a modified copy of the receiver, and the
second element is a bound variable hash:
ds, bv = DB[:table].filter(:a=>1).unbind
ds # DB[:table].filter(:a=>:$a)
bv # {:a=>1}
The purpose of doing this is that you can then use prepare or call
on the returned dataset with the returned bound variables:
ds.call(:select, bv)
# SELECT * FROM table WHERE (a = ?); [1]
ps = ds.prepare(:select, :ps_name)
# PREPARE ps_name AS SELECT * FROM table WHERE (a = ?)
ps.call(bv)
# EXECUTE ps_name(1)
Basically, Dataset#unbind takes a specific statement and attempts
to turn it into a generic statement, along with the placeholder
values it extracted.
Unfortunately, Dataset#unbind cannot handle all cases. For
example:
DB[:table].filter{a + 1 > 10}.unbind
will not unbind any values. Also, if you have a query with
multiple different values for a variable, it will raise an
UnbindDuplicate exception:
DB[:table].filter(:a=>1).or(:a=>2).unbind
* A defaults_setter plugin has been added that makes it easy to
automatically set default values when creating new objects. This
plugin makes Sequel::Model behave more like ActiveRecord in that
new model instances (before saving) will have default values
parsed from the database. Unlike ActiveRecord, only values with
non-NULL defaults are set. Also, Sequel allows you to easily
modify the default values used:
Album.plugin :default_values
Album.new.values # {:copies_sold => 0}
Album.default_values[:copies_sold] = 42
Album.new.values # {:copies_sold => 42}
Before, this was commonly done in an after_initialize hook, but
that's slower as it is also called for model instances loaded from
the database.
* A Database#views method has been added that returns an array
of symbols representing view names in the database. This works
just like Database#tables except it returns views.
* A Sequel::ASTTransformer class was added that makes it easy to
write custom transformers of Sequel's internal abstract syntax
trees. Dataset#qualify now uses a subclass of ASTTransformer to do
its transformations, as does the new Dataset#unbind.
= Other Improvements
* Database#create_table? now uses a single query with IF NOT EXISTS
if the database supports such syntax. Previously, it issued a
SELECT query to determine table existence. Sequel currently
supports this syntax on MySQL, H2, and SQLite 3.3.0+.
The Database#supports_create_table_if_not_exists? method was added
to allow users to determine whether this syntax is supported.
* Multiple column IN/NOT IN emulation now works correctly with
model datasets (or other datasets that use a row_proc).
* You can now correctly invert SQL::Constant instances:
Sequel::NULL # NULL
~Sequel::NULL # NOT NULL
Sequel::TRUE # TRUE
~Sequel::TRUE # FALSE
* A bug in the association_pks plugin has been fixed in the case
where the associated table had a different primary key column name
than the current table.
* The emulated prepared statement support now supports nil and false
as bound values.
* The to_dot extension was refactored for greater readability. The
only change was a small fix in the display for SQL::Subscript
instances.
* The Dataset#supports_insert_select? method is now available to let
you know if the dataset supports insert_select. You should use
this method instead of respond_to? for checking for insert_select
support.
* Prepared statements/bound variable can now use a new :insert_select
type for preparing a statement that will insert a row and return
the row inserted, if the dataset supports insert_select.
* The Model#initialize_set private method now exists for easier plugin
writing. It is only called for new model objects, with the hash
given to initialize. By default, it just calls set.
* A small bug when creating anonymous subclasses of Sequel::Model on
ruby 1.9 has been fixed.
* If Thread#kill is used inside a transaction on ruby 1.8 or
rubinius, the transaction is rolled back. This situation is not
handled correctly on JRuby or ruby 1.9, and I'm not sure it's
possible to handle correctly on those implementations.
* The postgres adapter now supports the
Sequel::Postgres::PG_NAMED_TYPES hash for associating conversion
procs for custom types that don't necessarily have the same type
oid on different databases. This hash uses symbol keys and
proc values:
Sequel::Postgres::PG_NAMED_TYPES[:interval] = proc{|v| ...}
The conversion procs now use a separate hash per Database object
instead of a hash shared across all Database objects. You
can now modify the types for a particular Database object, but
you have to use the type oid:
DB.conversion_procs[42] = proc{|v| ...}
* On SQLite and MSSQL, literalization of true and false values given
directly to Dataset#filter has been fixed. So the following now
works correctly on those databases:
DB[:table].filter(true)
DB[:table].filter(false)
Unfortunately, because SQLite and MSSQL don't have a real boolean
type, these will not work:
DB[:table].filter{a & true}
DB[:table].filter{a & false}
You currently have to work around the issue by doing:
DB[:table].filter{a & Sequel::TRUE}
DB[:table].filter{a & Sequel::FALSE}
It is possible that a future version of Sequel will remove the need
for this workaround, but that requires having a separate
literalization method specific to filters.
* The MySQL bit type is no longer treated as a boolean. On MySQL, the
bit type is a bitfield, which is very different than the MSSQL bit
type, which is the closest thing to a boolean on MSSQL.
* The bool database type is now recognized as a boolean. Some SQLite
databases use bool, such as the ones used in Firefox.
* SQL_AUTO_IS_NULL=0 is now set by default when connecting to MySQL
using the swift or jdbc adapters. Previously, it was only set by
default when using the mysql or mysql2 adapters.
* Dataset#limit now works correctly on Access, using the TOP syntax.
* Dataset#limit now works correctly on DB2, using the FETCH FIRST
syntax.
* The jdbc mssql subadapter was split into separate subadapters for
sqlserver (using Microsoft's driver) and jtds (using the open
source JTDS driver).
* The jdbc jtds subadapter now supports converting Java CLOB
objects to ruby strings.
* Tables from the INFORMATION_SCHEMA are now ignored when parsing
schema on JDBC.
* The informix adapter has been split into shared/specific parts, and
a jdbc informix subadapter has been added.
* Dataset#insert_select now works correctly on MSSQL when the core
extensions are disabled.
* The sqlite adapter now logs when preparing a statement.
* You no longer need to be a PostgreSQL superuser to run the postgres
adapter specs.
* The connection pool specs are now about 10 times faster and not
subject to race conditions due to using Queues instead of
sleeping.
= Backwards Compatibility
* Model#save no longer calls Model#valid?. It now calls the
Model#_valid? private method that Model#valid? also calls. To mark
a model instance invalid, you should override the Model#validate
method and add validation errors to the object.
* The BeforeHookFailure exception class has been renamed to
HookFailure since hook failures can now be raised by around hooks
that don't call super. BeforeHookFailure is now an alias to
HookFailure, so no code should break, but you should update your
code to reflect the new name.
* Any custom argument mappers used for prepared statements now need
to implement the prepared_arg? private instance method and have it
return true.
* If your databases uses bit as a boolean type and isn't MSSQL, it's
possible that those columns will no longer be treated as booleans.
Please report such an issue on the bugtracker.
* It is possible that the filtering and excluding by association
datasets will break backwards compatibility in some apps. This can
only occur if you are using a symbol with the same name as an
association with a model dataset whose model is the same as the
associated class. As associations almost never have the same names
as columns, this would require either aliasing or joining to
another table. If for some reason this does break your app, you
can work around it by changing the symbol to an SQL::Identifier or
a literal string.
* The Sequel::Postgres.use_iso_date_format= method now only affects
future Database objects.
* On MySQL, Database#tables no longer returns view names, it only
returns table names. You have to use Database#views to get view
names now.
sequel-5.63.0/doc/release_notes/3.25.0.txt 0000664 0000000 0000000 00000006171 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* drop_table, drop_view, drop_column, and drop_constraint all now
support a :cascade option for using CASCADE.
DB.drop_table(:tab, :cascade=>true)
# DROP TABLE tab CASCADE
DB.drop_column(:tab, :col, :cascade=>true)
# ALTER TABLE tab DROP COLUMN col CASCADE
A few databases support CASCADE for dropping tables and views,
but only PostgreSQL appears to support it for columns and
constraints. Using the :cascade option when the underlying
database doesn't support it will probably result in a
DatabaseError being raised.
* You can now use datasets as expressions, allowing things such as:
DB[:table1].select(:column1) > DB[:table2].select(:column2)
# (SELECT column1 FROM table1) > (SELECT column2 FROM table2)
DB[:table1].select(:column1).cast(Integer)
# CAST((SELECT column1 FROM table1) AS integer)
* Dataset#select_group has been added for grouping and selecting on
the same columns.
DB[:a].select_group(:b, :c)
# SELECT b, c FROM a GROUP BY b, c
* Dataset#exclude_where and #exclude_having methods have been added,
allowing you to specify which clause to affect. #exclude's
behavior is still to add to the HAVING clause if one is present,
and use the WHERE clause otherwise.
* Dataset#select_all now accepts optional arguments and will select
all columns from those arguments if present:
DB[:a].select_all(:a)
# SELECT a.* FROM a
DB.from(:a, :b).select_all(:a, :b)
# SELECT a.*, b.* FROM a, b
* Dataset#group and #group_and_count now both accept virtual row
blocks:
DB[:a].select(:b).group{c(d)}
# SELECT b FROM a GROUP BY c(d)
* If you use a LiteralString as a validation error message,
Errors#full_messages will now not add the related column name to
the start of the error message.
* Model.set_dataset now accepts SQL::Identifier,
SQL::QualifiedIdentifier, and SQL::AliasedExpression instances,
treating them like Symbols.
= Other Improvements
* The association_pks plugin's setter method will now automatically
convert a given array of strings to an array of integers if the
primary key field is an integer field, which should make it easier
to use in web applications.
* nil bound variable, prepared statement, and stored procedure
arguments are now handled correctly in the JDBC adapter.
* On 1.9, you can now load plugins even when ::ClassMethods,
::InstanceMethods, or ::DatasetMethods is defined.
= Backwards Compatibility
* The tinytds adapter now only works with tiny_tds 0.4.5 and greater.
Also, if you were using the tinytds adapter with FreeTDS 0.91rc1,
you need to upgrade to FreeTDS 0.91rc2 for it to work. Also, if
you were referencing an entry in the freetds.conf file, you now
need to specify it directly using the :dataserver option when
connecting, the adapter no longer copies the :host option to the
:dataserver option.
* On postgresql, Sequel now no longer drops tables with CASCADE by
default. You now have to use the :cascade option to drop_table if
you want to use CASCADE.
* The Database#drop_table_sql private method now takes an additional
options hash argument.
sequel-5.63.0/doc/release_notes/3.26.0.txt 0000664 0000000 0000000 00000007462 14342141206 0017775 0 ustar 00root root 0000000 0000000 = Performance Enhancements
* The internal implementation of eager_graph has been made 75% to
225% faster than before, with greater benefits to more complex
graphs.
* Dataset creation has been made much faster (2.5x on 1.8 and 4.4x on
1.9), and dataset cloning has been made significantly faster (40%
on 1.8 and 20% on 1.9).
= Other Improvements
* Strings passed to setter methods for integer columns are no longer
considered to be in octal format if they include leading zeroes.
The previous behavior was never intended, but was a side effect of
using Kernel#Integer. Strings with leading zeroes are now treated
as decimal, and you can still use the 0x prefix to treat them as
hexidecimal. If anyone was relying on the old octal behavior, let
me know and I'll add an extension that restores the octal behavior.
* The identity_map plugin now works with the standard eager loading
of many_to_many and many_through_many associations.
* Database#create_table! now only attempts to drop the table if it
already exists. Previously, it attempted to drop the table
unconditionally ignoring any errors, which resulted in misleading
error messages if dropping the table raised an error caused by
permissions or referential integrity issues.
* The default connection pool now correctly handles the case where a
disconnect error is raised and an exception is raised while
running the disconnection proc.
* Disconnection errors are now detected when issuing transaction
statements such as BEGIN/ROLLBACK/COMMIT. Previously, these
statements did not handle disconnect errors on most adapters.
* More disconnection errors are now detected. Specifically, the ado
adapter and do postgres subadapter now handle disconnect errors,
and the postgres adapter handles more types of disconnect errors.
* Database#table_exists? now always issues a query to select from the
table, it no longer attempts to parse the schema to determine the
information on PostgreSQL and Oracle.
* Date, DateTime, and Time values are now literalized correctly on
Microsoft Access.
* Connecting with the mysql adapter with an options hash now works if
the :port option is a string, which makes it easier to use when the
connection information is stored in YAML.
* The xml_serializer plugin now works around a bug in pure-Java
nokogiri regarding the handling of nil values.
* Nicer error messages are now used if there is an attempt to call
an invalid or restricted setter method.
* The RDocs are now formatted with hanna-nouveau, which allows for
section ordering, so the Database and Dataset RDoc pages are
more friendly.
= Backwards Compatibility
* If you call a Dataset method such as #each on an eager_graphed
dataset, you now get plain hashes that have column alias symbol
keys and their values. Previously, you got a graphed response with
table alias keys and model values. It's not wise to depend on the
behavior, the only supported way of returning records when eager
loading is to use #all.
* An error is now raised if you attempt to eager load via
Dataset#eager a many_to_many association that includes an
:eager_graph option. Previously, incorrect SQL would have been
generated and an error raised by the database.
* Datasets are no longer guaranteed to have @row_proc,
@indentifier_input_method, and @identifier_output_method defined
as instance variables. You should be be using methods to access
them anyway.
* Database#table_exists? on PostgreSQL no longer accepts an options
hash. Previously, you could use a :schema option. You must now
provide the schema inside the table argument (e.g. :schema__table).
* If you want to use the rdoc tasks in Sequel's Rakefile, and you are
still using the hanna RDoc template with RDoc 2.3, you need to
upgrade to using hanna-nouveau with RDoc 3.8+.
sequel-5.63.0/doc/release_notes/3.27.0.txt 0000664 0000000 0000000 00000005521 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* Model.dataset_module has been added for easily adding methods to
a model's dataset:
Album.dataset_module do
def with_name_like(x)
filter(:name.like(x))
end
def selling_at_least(x)
filter{copies_sold > x}
end
end
Album.with_name_like('Foo%').selling_at_least(100000).all
Previously, you could use def_dataset_method to accomplish the
same thing. dataset_module is generally cleaner, plus you are
using actual methods instead of blocks, so calling the methods
is faster on some ruby implementations.
* Sequel now uses a Sequel::SQLTime class (a subclass of Time) when
dealing with values for SQL time columns (which don't have a date
component). These values are handled correctly when used in
filters or insert/update statements (using only the time
component), so Sequel can now successfully round trip values for
time columns. Not all adapters support returning time column
values as SQLTime instances, but the most common ones do.
* You can now drop foreign key, primary key, and unique constraints
on MySQL by passing the :type=>(:foreign_key|:primary_key|:unique)
option to Database#drop_constraint.
* The ODBC adapter now has initial support for the DB2 database, use
the :db_type=>'db2' option to load the support.
= Other Improvements
* The mysql2 adapter now uses native prepared statements.
* The tinytds adapter now uses uses sp_executesql for prepared
statements.
* DateTime and Time objects are now converted to Date objects when
they are assigned to a date column in a Model instance.
* When converting a Date object to a DateTime object, the resulting
DateTime object now has no fractional day components. Previously,
depending on your timezone settings, it could have had fractional
day components.
* The mysql2 adapter now supports stored procedures, as long as they
don't return results.
* Mass assignment protection now handles including modules in model
classes and extending model instances with modules. Previously, if
you defined a setter method in a module, access to it may have been
restricted.
* The prepared_statements_safe plugin now works on classes without
datasets, so you can now do the following to load it for all models:
Sequel::Model.plugin :prepared_statements_safe
* Dataset#hash now works correctly when handling SQL::Expression
instances.
* Model#hash now correctly handles classes with no primary key or with
a composite primary key.
* Model#exists? now always returns false for new model objects.
= Backwards Compatibility
* If you were previously setting primary key values manually for new
model objects and then calling exists? to see if the instance is
already in the database, you need to change your code from:
model.exists?
to:
model.this.get(1).nil?
sequel-5.63.0/doc/release_notes/3.28.0.txt 0000664 0000000 0000000 00000030255 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Adapter Support
* Sequel now has much better support for the DB2 database.
* An ibmdb adapter has been added, and is the recommended adapter to
to use if you want to connect to DB2 from MRI.
* A jdbc db2 subadapter has been added, allowing good DB2 support on
JRuby.
* The db2 adapter has been cleaned up substantially, and now works
well, but it is still recommended that you switch to ibmdb if you
are using the db2 adapter.
* The firebird adapter has been split into shared and specific parts,
and quite a few fixes were made to it.
* A jdbc firebird subadapter has been added, allowing connection to
firebird databases from JRuby.
= New PostgreSQL 9.1 Features
* Dataset#returning has been added for using the RETURNING clause on
INSERT/UPDATE/DELETE queries. RETURNING allows such queries to
return results in much the same way as a SELECT query works.
When Dataset#returning is used, Dataset #insert, #update, and
#delete now accept a block that is passed to Dataset #fetch_rows
which is yielded plain ruby hashes for each row inserted, updated,
or deleted. If Dataset#returning is used and a block is not given
to those methods, those methods will return an array of plain hashes
for all rows inserted, updated, and deleted.
* Dataset#with_sql now treats a symbol as a first argument as a method
name to call to get the SQL. The expected use case for this is with
Dataset#returning and insert/update/delete:
DB[:items].
returning(:id).
with_sql(:update_sql, :b => :b + 1).
map(:id)
Basically, it makes it more easily to statically set the
insert/update/delete SQL, and then be able to use the full
dataset API for returning results. As mentioned above, using
Dataset#returning with #insert, #update, and #delete yields plain
hashes, so if you want to have the row_proc applied (e.g. you are
using models), you need to use this method instead, since you can
then call #each or #all to make sure the row_proc is called on all
returned rows.
* Dataset#with (common table expressions) now affects
INSERT/UPDATE/DELETE queries.
* Database#create_table? now uses CREATE TABLE IF NOT EXISTS on
PostgreSQL 9.1.
= Other New Features
* The :limit option is now respected when eager loading via either
eager or eager_graph. By default, Sequel will just do an array
slice of the resulting ruby array, which gets the correct answer,
but does not offer any performance improvements. Sequel also
offers a new :eager_limit_strategy option for using more advanced
query types that only load the related records from the database.
The available values for the :eager_limit_strategy option are:
:window_function - This uses the row_number window function
partitioned by the related key fields. It can only be used
on databases that support window functions (PostgreSQL 8.4+,
Microsoft SQL Server 2005+, DB2).
:correlated_subquery - This uses a correlated subquery that is
limited. It works on most databases except MySQL and DB2.
You can provide a value of true as the option to have Sequel
pick a strategy to use. Sequel will never use a correlated
subquery for true, since in some cases it can perform worse than
loading all related records and doing the array slice in ruby.
If you want to enable an eager_limit_strategy globally, you can
set Sequel::Model.default_eager_limit_strategy to a value, and
all associations that use :limit will default to using that
strategy.
* one_to_one associations that do not represent true one-to-one
database relationships, but represent one-to-many relationships
where you are only returning the first object based on a given
order are also now handled correctly when eager loading.
Previously, eager loading such associations resulted in the last
matching object being associated instead of the first matching
object being associated.
You can also use an :eager_limit_strategy for one_to_one
associations. In addition to the :window_function and
:correlated_subquery values, there is also a :distinct_on value
that is available on PostgreSQL for using DISTINCT ON, which is
the fastest strategy if you are using PostgreSQL.
* Dataset#map, #to_hash, #select_map, #select_order_map, and
#select_hash now accept arrays of symbols, and if given arrays
of symbols, use arrays of results. For example:
DB[:items].map([:id, :name])
# => [[1, 'foo'], [2, 'bar'], ...]
DB[:items].to_hash([:id, :foo_id], [:name, :bar_id])
# => {[1, 3]=>['foo', 5], [2, 4]=>['bar', 6], ...}
* For SQL expression objects where Sequel cannot deduce the type
of the object, it now will consider the type of the argument
when a &, |, or + operator is used. For example:
:x & 1
Previously, this did "x AND 1", now it does "x & 1". Using a
logical operator on an integer doesn't make sense, but it's
possible people did so if the database uses 1/0 for true/false.
Likewise:
:x + 'foo'
Previously, this did "x + 'foo'" (addition), now it does
"x || 'foo'" (string concatenation).
* The sql_string, sql_number, and sql_boolean methods are now
available on SQL::ComplexExpressions, so you can do:
(:x + 1).sql_string + ' foos'
# (x + 1) || ' foos'
Previously, there was not an easy way to generate such SQL
expressions.
* :after_load association hooks are now applied when using
eager_graph. Previously, they were only applied when using
eager, not when using eager_graph.
* Database#copy_table has been added to the postgres adapter if pg
is used as the underlying driver. It allows you to get very
fast exports of table data in text or CSV format. It also
accepts datasets, allowing fast exports of arbitrary queries
in text or CSV format.
* SQL extract support (:timestamp.extract(:year)) is now emulated
on the databases that don't natively support it, such as SQLite,
Microsoft SQL Server, and DB2. At least the following values are
supported for extraction: :year, :month, :day, :hour, :minute,
and :second.
* The bitwise XOR operator is now emulated on SQLite. Previously,
attempting to use it would cause the database to raise an error.
* A Database#use_timestamp_timezones accessor has been added on
SQLite. This allows you to turn off the use of timezones in
timestamps by setting the value to false. This is necessary if you
want you want to use the SQLite datetime functions, or the new
ability to emulate extract.
Note that this setting does not affect the current database
content. To convert old databases to the new format, you'll
have to resave all rows that have timestamps.
At some point in the future, Sequel may default to not using
timezones in timestamps by default on SQLite, so if you would
like to rely on the current behavior, you should set this
accessor to true now.
* Sequel now works around bugs in MySQL when using a subselect with
a LIMIT by using a nested subselect.
* Sequel now works around issues in Microsoft SQL Server and DB2 when
using a subselect with IN/NOT IN that uses the emulated offset
support.
* The jdbc adapter now returns java.sql.Clob objects as
Sequel::SQL::Blobs.
* Sequel now considers database clob types as the :blob schema type.
* Sequel::SQLTime.create has been added for more easily creating
instances:
Sequel::SQLTime.create(hour, minute, second, usec)
* Dataset#select_all now accepts SQL::AliasedExpression and
SQL::JoinClause arguments and returns the appropriate
SQL::ColumnAll value that selects all columns from the related
table.
* Model.set_dataset now accepts Sequel::LiteralString objects that
represent table names. This usage is not encouraged except in
rare cases such as using a set returning function in PostgreSQL.
* Dataset#supports_cte? now takes an optional argument specifying the
type of query (:insert, :update, :delete, :select). It defaults to
:select.
* Dataset#supports_returning? has been added. It requires an
argument specifying the type of query (:insert, :update, or
:delete).
* Dataset#supports_cte_in_subqueries? has been added for checking
for support for this ability. Apparently, only PostgreSQL
currently supports this. For other adapters that support CTEs but
not in subqueries, if a subquery with a CTE is used in a JOIN, the
CTE is moved from the subquery to the main query.
* Dataset#supports_select_all_and_column has been added for seeing
if "SELECT *, foo ..." style queries are supported. This is false
on DB2, which doesn't allow such queries. When it is false, using
select_append on a dataset that doesn't specifically select columns
will now change the query to do "SELECT table.*, foo ..." instead,
working around the limitation on DB2.
* Dataset#supports_ordered_distinct_on? has been added. Currently,
this is only true on PostgreSQL. MySQL can emulate DISTINCT ON
using GROUP BY, but it doesn't respect ORDER BY, so it some
cases it cannot be used equivalently.
* Dataset#supports_where_true? has been added for checking for support
of WHERE TRUE (or WHERE 1 if 1 is true). Not all databases support
using such a construct, and on the databases that do not, you have
to use WHERE (1 = 1) or something similar.
= Other Improvements
* Sequel 3.27.0 was negatively affected by a serious bug in
ActiveSupport's Time.=== that has still not been fixed, which
broke the literalization of Time objects. In spite of the bad
precedent it sets, Sequel now avoids using Time.=== on a
subclass of Time to work around this ActiveSupport bug.
* Dataset#with_pk now uses a qualified primary key instead of an
unqualified primary key, which means it can now be used correctly
after joining to a separate table.
* Association after_load hooks when lazy loading are now called
after the association has been loaded, which allows them to change
which records are cached. This makes the lazy load case more
similar to the eager load case.
* The metaprogrammatically created methods that implement Sequel's
DSL support have been made significantly faster by using
module_eval instead of define_method.
* The type translation in the postgres, mysql, and sqlite adapters
has been made faster by using Method objects that result in more
direct processing.
* Typecasting values for time columns from Time values to
Sequel::SQLTime values now correctly handles fractional seconds on
ruby 1.9.
= Backwards Compatibility
* Dataset#insert_returning_sql has been changed to a private method
in the PostgreSQL and Firebird adapters, and it operates
differently than it did previously. The private
#insert_returning_pk_sql and #insert_returning_select_sql methods
have been removed.
* Dataset#with_pk no longer does some defensive checking for misuse of
primary keys (e.g. providing a composite key when the model uses
a single key). Previously, Sequel would raise an Error
immediately, now such behavior is undefined, with the most likely
behavior being the database raising an Error.
* The :alias_association_type_map and :alias_association_name_map
settings have been removed from the :eager_graph dataset option,
in favor of just storing the related association reflection.
* The internals of the db2 adapter have changed substantially, if you
were relying on some of the private methods defined in it, you will
probably have to modify your code.
* The firebird adapter was substanially modified, specifically parts
related to insert returning autogenerated primary key values, so if
you were previously using the adapter you should probably take more
care than usual when testing your upgrade.
* The Dataset::WITH_SUPPORTED constant has been removed.
* The Dataset#supports_cte? method now accepts an optional argument.
If you overrode this method, your overridden method now must
accept an optional argument.
* If you were previously doing:
:x & 1
and wanting "x AND 1", you have to switch to:
:x.sql_boolean & 1
Likewise, if you were previously doing:
:x + 'foo'
and wanting "x + 'foo'", you need to switch to:
:x.sql_number + 'foo'
* Sequel no longer does defensive type checking in the SQL expression
support, as it was often more strict than the database and would
not allow the creation of expressions that were valid for the
database.
sequel-5.63.0/doc/release_notes/3.29.0.txt 0000664 0000000 0000000 00000044430 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Adapter Support
* Sequel now has much better support for Oracle, both in the
ruby-oci8-based oracle adapter and in the jdbc/oracle adapter.
* Sequel now has much better support for connecting to HSQLDB
using the jdbc adapter. This support does not work correctly
with the jdbc-hsqldb gem, since the version it uses is too
old. You'll need to load the .jar file manually until the
gem is updated.
* Sequel now has much better support for connecting to Apache
Derby databases using the jdbc adapter. This works with
the jdbc-derby gem, but it's recommend you grab an updated
.jar file as the jdbc-derby gem doesn't currently support
truncate or booleans.
* The db2 adapter has had most of the remaining issues fixed,
and can now run Sequel's test suite cleanly. It's still
recommended that users switch to the ibmdb adapter if they
are connecting to DB2.
* A mock adapter has been added which provides a mock Database
object that allows you to easily set the returned rows, the
number of rows modified by update/delete, and the
autogenerating primary key integer for insert. It also allows
you to set specific columns in the dataset when retrieving
rows. The specs were full of partial implementations of
mock adapters, this mock adapter is much more complete and
offers full support for mocking transactions and database
sharding. Example:
DB = Sequel.mock(:fetch=>{:id=>1}, :numrows=>2, :autoid=>3)
DB[:items].all # => [{:id => 1}]
DB[:items].insert # => 3
DB[:items].insert # => 4
DB[:items].delete # => 2
DB[:items].update(:id=>2) # => 2
DB.sqls # => ['SELECT ...', 'INSERT ...', ...]
In addition to being useful in the specs, the mock adapter is
also used if you use bin/sequel without a database argument,
which makes it much easier to play around with Sequel on the
command line without being tied to a real database.
= New Transaction Features
* Database after_commit and after_rollback hooks have been added,
allowing you to set procs that are called after the currently-
in-effect transaction commits or rolls back. If the Database
is not currently in a transaction, the after_commit proc is
called immediately and the after_rollback proc is ignored.
* Model after_commit, after_rollback, after_destroy_commit, and
after_destroy_rollback hooks have been added that use the new
Database after_commit/after_rollback hook to execute code after
commit or rollback.
* Database#transaction now supports a :rollback => :reraise option
to reraise any Sequel::Rollback exceptions raised by the block.
* Database#transaction now supports a :rollback => :always option
to always rollback the transaction, which is mostly useful when
using transaction-based testing.
* Sequel.transaction has been added, allowing you to run
simultaneous transactions on multiple Database objects:
Sequel.transaction([DB1, DB2]){...}
# similar to:
DB1.transaction{DB2.transaction{...}}
You can combine this with the :rollback => :always option to
easily use multiple databases in the same test suite and make sure
that changes are rolled back on all of them.
* Database#in_transaction? has been added so that users can detect
whether the code is currently inside a transaction.
* The generic JDBC transaction support, used by 6 of Sequel's jdbc
subapters, now supports savepoints if the underlying JDBC driver
supports savepoints.
= Other New Features
* A dataset_associations plugin has been added, allowing datasets
to call association methods, which return datasets of rows in
the associated table that are associated to rows in the current
dataset.
# Dataset of tracks from albums with name < 'M'
# by artists with name > 'M'
Artist.filter(:name > 'M').albums.filter(:name < 'M').tracks
# SELECT * FROM tracks
# WHERE (tracks.album_id IN (
# SELECT albums.id FROM albums
# WHERE ((albums.artist_id IN (
# SELECT artists.id FROM artists
# WHERE (name > 'M')))
# AND (name < 'M'))))
* Database#extend_datasets has been added, allowing you to do the
equivalent of extending all of the database's future datasets
with a module. For performance, it creates an anonymous
subclass of the current dataset class and includes a module in
it, and uses the subclass to create future datasets.
Using this feature allows you to override any dataset method
and call super, similar to how Sequel::Model plugins work. The
method takes either a module:
Sequel.extension :columns_introspection
DB.extend_datasets(Sequel::ColumnsIntrospection)
or a block that it uses to create an anonymous module:
DB.extend_datasets do
# Always select from table.* instead of *
def from(*tables)
ds = super
if !@opts[:select] || @opts[:select].empty?
ds = ds.select_all(*tables)
end
ds
end
end
* Database#<< and Dataset#<< now return self, which allow them
to be used in chaining:
DB << "UPDATE foo SET bar_id = NULL" << "DROP TABLE bars"
DB[:foo] << {:bar_id=>0} << DB[:bars].select(:id)
* A Database#timezone accessor has been added, allowing you to
override Sequel.database_timezone on a per-Database basis, which
allows you to use two separate Database objects in the same
process that have different timezones.
* You can now modify the type conversion procs on a per-Database
basis when using the mysql, sqlite, and ibmdb adapters, by
modifying the hash returned by Database#conversion_procs.
* Model.dataset_module now accepts a Module instance as an argument,
and extends the model's dataset with that module.
* When using the postgres adapter with the pg driver, you can now
use Database#listen to wait for notifications. All adapters that
connect to postgres now support Database#notify to send
notifications:
# process 1
DB.listen('foo') do |ev, pid, payload|
ev # => 'foo'
notify_pid # => some Integer
payload # => 'bar'
end
# process 2
DB.notify('foo', :payload=>'bar')
* many_to_one associations now have a :qualify option that can be set
to false to not qualify the primary key when loading the
association. This shouldn't be necessary to use in most cases, but
in some cases qualifying a primary key breaks certain queries (e.g.
using JOIN USING on the same column on Oracle).
* Database#schema can now take a dataset as an argument if it just
selects from a single table. If a dataset is provided, the
schema parsing will use that dataset's identifier_input_method
and identifier_output_method for the parsing, instead of the
database's default. This makes it possible for Model classes
to correctly get the table schema if they use a dataset whose
identifier_(input|output)_method differs from the database
default.
* On databases that support common table expressions (CTEs) but do
not support CTE usage in subselects, Sequel now emulates support
by moving CTEs from the subselect to the main select when using
the Dataset from, from_self, with, with_recursive, union,
intersect, and except methods.
* The bitwise compliment operator is now emulated on H2.
* You can now set the convert_tinyint_to_bool setting on a
per-Database basis in the mysql and mysql2 adapters.
* You can now set the convert_invalid_date_time setting on a
per-Database basis in the mysql adapter.
* Database instances now have a dataset_class accessor that allows
you to set which class is used when creating datasets. This is
mostly used to implement the extend_datasets support, but it
could be useful for other purposes.
* Dataset#unused_table_alias now accepts an optional 2nd argument,
which should be an array of additional symbols that should be
considered as already used.
* Dataset#requires_placeholder_type_specifiers? has been added to
check if the dataset requires you use type specifiers for
bound variable placeholders.
The prepared_statements plugin now checks this setting and works
correctly on adapters that set it to true, such as oracle.
* Dataset#recursive_cte_requires_column_aliases? has been added
to check if you must provide a column list for a recursive CTE.
The rcte_tree plugin now checks this setting an works correctly
on databases that set it to true, such as Oracle and HSQLDB.
= Performance Improvements
* Numerous optimizations were made to loading model objects from
the database, resulting in a 7-16% speedup.
Model.call was added, and now .load is just an alias for .call.
This allows you to make the model dataset's row_proc the model
itself, instead of needing a separate block, which improves
performance.
While Model.load used to call .new (and therefore #initialize),
Model.call uses .allocate/#set_values/#after_initialize for speed.
This saves a method call or two, and skips setting the @new
instance variable.
* Dataset#map, #to_hash, #select_map, #select_order_map, and
#select_hash are now faster if any of the provided arguments are
an array of symbols.
* The Model.[] optimization is now applied in more cases.
= Other Improvements
* Sequel now creates accessor methods for all columns in a model's
table, even if the dataset doesn't select the columns. This has
been the specified behavior for a while, but the spec was broken.
This allows you do to:
Model.dataset = DB[:table].select(:column1, :column2)
Model.select_more(:column3).first.column3
* Model.def_dataset_method now correctly handles method names that
can't be used directly (such as method names with spaces). This
isn't so the method can be used with arbitrary user input, but
it will allow safe creation of dataset methods that are derived
from column names, which could contain spaces.
* Model.def_dataset_method no longer overrides private model
methods.
* The optimization that Model.[] uses now works correctly if the
model's dataset uses a different identifier_input_method than
the database.
* Sharding is supported correctly by default for the transactions
used by model objects. Previously, you had to use the sharding
plugin to make sure the same shard was used for transactions as
for the insert/update/delete statements.
* Sequel now fully supports using an aliased table for the
:join_table option of a many_to_many association. The only real
use case for an aliased :join_table option is when the join table
is the same as the associated model table.
* A bug when eagerly loading a many_through_many association with
composite keys where one of the join tables requires an alias
has been fixed.
* Sequel's transaction internals have had substantial improvments.
You can now open up simultaneous transactions on two separate
shards of the same Database object in the same thread. The new
design allows for future support of connection pools that aren't
based on threads. Sequel no longer abuses thread-local variables
to store savepoint state.
* Dataset#select_map and #select_order_map now return an array of
single element arrays if given an array with a single entry as
an argument. Previously, they returned an array of values, which
wasn't consistent.
* Sequel's emulation of bitwise operators with more than 2 arguments
now works on all adapters that use the emulation. The emulation
was broken in 3.28.0 when more than 2 arguments were used on H2,
DB2, Microsoft SQL Server, PostgreSQL, and SQLite.
* Dataset#columns now correctly handles the emulated offset support
used on DB2, Oracle, and Microsoft SQL Server when using the
jdbc, odbc, ado, and dbi adapters. Previously, Dataet#columns
could contain the row number column, which wasn't in the
hashes yielded by Dataset#each.
* Sequel can now parse primary key information on Microsoft SQL
Server. Previously, the only adapter that supported this was the
jdbc adapter, which uses the generic JDBC support. The shared
mssql adapter now supports parsing the information directly from
the database system tables. This means that if you are using
Model objects with a Microsoft SQL Server database using the
tinytds, odbc, or ado adapters, the model primary key
information will be set automatically.
* Sequel's prepared statement support no longer defines singleton
methods on the prepared statement objects.
* StringMethods#like is now case sensitive on SQLite and Microsoft
SQL Server, making it more similar to other databases.
* Sequel now works around an SQLite column naming bug if you select
columns qualified with the alias of a subselect without providing
an alias for the column itself.
* Sequel now handles more bound variable types when using bound
variables outside of prepared statements on SQLite.
* Sequel now works around a bug in certain versions of the
JDBC/SQLite driver when emulating alter table support for
operations such as drop_column.
* Sequel now emulates the add_constraint and drop_constraint
alter table operations on SQLite, though the emulation has
issues.
* Sequel now correctly handles composite primary keys when
emulating alter_table operations on SQLite.
* Sequel now applies the correct PRAGMA statements by default when
connecting to SQLite via the amalgalite and swift adapters.
* Sequel now supports using savepoints inside prepared transactions
on MySQL.
* Sequel now closes JDBC ResultSet objects as soon as it is done
using them, leading to potentially lower memory usage in the JDBC
adapter, and fixes issues if you try to drop a table before
GC has collected a related ResultSet.
* Sequel can now correctly insert all default values into a table
on DB2. Before, this didn't work correctly if the table had more
than one column.
* Another type of disconnection error is now recognized in the
mysql2 adapter.
* Sequel now uses better error messages if you attempt to execute a
prepared statement without a name using the postgres, mysql, and
mysql2 adapters.
* Some small fixes have been made that allow Sequel to run better
when $SAFE=1. However, Sequel is not officially supported using
$SAFE > 0, so there could be many issues remaining.
* Sequel's core and model specs were cleaned up by using the mock
adapter to eliminate a lot of redundant code.
* Sequel's integration tests were sped up considerably, halving
the execution time on some adapters.
= Backwards Compatibility
* Because Model.load is now an alias for .call, plugins should no
longer override load. Instead, they should override .call.
* Loading model objects from the database no longer calls
Model#initialize. Instead, it calls Model.allocate,
Model#set_values, and Model#after_initialize. So if you were
overriding #initialize and expecting the changes to affect model
objects loaded from the database, you need to change your code.
Additionally, @new is no longer set to false for objects retieved
from the database, since setting it to false hurts performance.
Model#new? still returns true or false, so this only affects you
if you are checking the instance variables directly.
* Dataset#<< no longer returns the autogenerated primary key for the
inserted row. As mentioned above, it now returns self to allow for
chaining. If you were previously relying on the return value,
switch from #<< to #insert.
* Dataset#map no longer calls the row_proc if given an argument, and
Dataset#to_hash no longer calls the row_proc if given two arguments.
This should only affect your code if you were using a row_proc that
modified the content of the hash (e.g. Model#after_initialize). If
you were relying on the old behavior, switch:
dataset.map(:foo)
# to
dataset.map{|r| r[:foo]}
dataset.to_hash(:foo, :bar)
# to
h = {}
dataset.each{|r| h[r[:foo]] = r[:bar]}
h
* Model classes now need to have a dataset before you can define
associations on them.
* Model classes now pass their dataset to Database#schema, instead of
their table name.
* The :eager_block association option (which defaults to the
association's block argument) is now called before the :eager_graph
association option has been applied, instead of after.
* The many_to_many association reflection :qualified_right_key entry
is now a method named qualified_right_key. Switch any
code using association_reflection[:qualified_right_key] to use
association_reflection.qualified_right_key.
* If you are using like on SQLite and Microsoft SQL Server and want
it to be case insensitive, switch to using ilike:
# Case sensitive
DB[:foos].where(:name.like('bar%'))
# Case insensitive
DB[:foos].where(:name.ilike('bar%'))
Sequel now sets the case_sensitive_like PRAGMA to true by default
on SQLite. To set it to false instead, pass the
:case_sensitive_like=>false option to the database when creating it.
* Sequel's alter table emulation on SQLite now renames the current
table then populates the replacement table, instead of
populating the replacement table at a temporary name, dropping
the current table, and then renaming the replacement table.
* The strings 'n' and 'no' (case insensitive) when typecasted to
boolean are now considered false values instead of true.
* The transaction internals had extensive changes, if you have any
code that depended on the transaction internals, it will probably
require changes.
* Using the Sequel::MySQL module settings for convert_tinyint_to_bool
and convert_invalid_date_time now only affects future Database
objects. You should switch to using the per-Database methods
if you are currently using the Sequel::MySQL module methods.
* The customized transaction support in the do (DataObjects) adapter
was removed. All three subadapters (postgres, mysql, sqlite) of
the do adapter implement their own transaction support, so this
should have no effect unless you were using the do adapter with
a different database type.
* The oracle support changed dramatically, so if you were relying
on the internals of the oracle support, you should take extra
care when upgrading.
= Advance Notice
* The next release of Sequel will contain significant changes to
how a dataset is literalized into an SQL string. If you have
a custom plugin, extension, or adapter that overrides a
method containing "literal", "sql", or "quote", or you make
other modifications or extensions to how Sequel currently
literalizes datasets to SQL, your code will likely need to
be modified to support the next release.
sequel-5.63.0/doc/release_notes/3.3.0.txt 0000664 0000000 0000000 00000016750 14342141206 0017710 0 ustar 00root root 0000000 0000000 New Features
------------
* An association_proxies plugin has been added. This is not a
full-blown proxy implementation, but it allows you to write code
such as:
artist.albums.filter{num_tracks > 10}
Without the plugin, you have to call filter specifically on the
association's dataset:
artist.albums_dataset.filter{num_tracks > 10}
The plugin works by proxying array methods to the array of
associated objects, and all other methods to the association's
dataset. This results in the following behavior:
# Will load the associated objects (unless they are already
# cached), and return the length of the array
artist.albums.length
# Will issue an SQL query with COUNT (even if the association
# is already cached), and return the result
artist.albums.count
* The add_*/remove_*/remove_all_* association methods now take
additional arguments that are passed down to the
_add_*/_remove_*/_remove_all_* methods. One of the things this
allows you to do is update additional columns in join tables for
many_to_many associations:
class Album
many_to_many :artists
def _add_artist(artist, values={})
DB[:albums_artists].
insert(values.merge(:album_id=>id,
:artist_id=>artist.id))
end
end
album = Album[1]
artist1 = Artist[2]
artist2 = Artist[3]
album.add_artist(artist1, :relationship=>'composer')
album.add_artist(artist2, :relationship=>'arranger')
* The JDBC adapter now accepts a :convert_types option to turn off
Java type conversion. The option is true by default for
backwards compatibility and correctness, but can be set to false
to double performance. The option can be set at the database
and dataset levels:
DB = Sequel.jdbc('jdbc:postgresql://host/database',
:convert_types=>false)
DB.convert_types = true
ds = DB[:table]
ds.convert_types = false
* Dataset#from_self now takes an option hash and respects an
:alias option, giving the table alias to use.
* Dataset#unlimited was added, similar to unfiltered and unordered.
* SQL::WindowFunction is now a subclass of SQL::GenericExpression,
so you can alias it and treat it like any other SQL::Function.
Other Improvements
------------------
* Microsoft SQL Server support is much, much better in Sequel 3.3.0
than in previous versions. Support is pretty good with the ODBC,
ADO, and JDBC adapters, close to the level of support for
PostreSQL, MySQL, SQLite, and H2. Improvements are too numerous
to list, but here are some highlights:
* Dataset#insert now returns the primary key (identity field), so
it can be used easier with models.
* Transactions can now use savepoints (except on ADO).
* Offsets are supported when using SQL Server 2005 or 2008, using
a ROW_NUMBER window function. However, you must specify an
order for your dataset (which you probably are already doing if
you are using offsets).
* Schema parsing has been implemented, though it doesn't support
primary key parsing (except on JDBC, since the JDBC support is
used there).
* The SQL syntax Sequel uses is now much more compatible, and
most schema modification methods and database types now work
correctly.
* The ADO and ODBC adapters both work much better now. The ADO
adapter no longer attempts to use transactions, since I've found
that ADO does not give a stable native connection (and hence
transactions weren't possible). I strongly recommend against
using the ADO adapter in production.
* The H2 JDBC subadapter now supports rename_column, set_column_null,
set_column_type, and add_foreign_key.
* Altering a columns type, null status, or default is now supported
on SQLite. You can also add primary keys and unique columns.
* Both the ADO and ODBC adapters now catch the native exception
classes and raise Sequel::DatabaseErrors.
* Model classes now default to associating to other classes in the
same scope. This makes it easier to use namespaced models.
* The schema parser and schema dumper now support the following
types: nchar, nvarchar, ntext, smalldatetime, smallmoney, binary,
and varbinary.
* You can now specify the null status for a column using :allow_null
in addition to :null. This is to make it easier to use the
table creation methods with the results of the schema parser.
* Renaming a NOT NULL column without a default now works on MySQL.
* Model class initialization now raises an exception if there is a
problem connecting to the database.
* Connection pool performance has been increased slightly.
* The literal_time method in the ODBC adapter has been fixed.
* An unlikely but potential bug in the MySQL adapter has been fixed.
Backwards Compatibility
-----------------------
* The convert_tinyint_to_bool setting moved from the main Sequel
module to the Sequel::MySQL module. The native MySQL adapter is
the only adapter that converted tinyint columns to booleans when
the rows are returned, so you can only use the setting with the
native MySQL adapter.
Additionally, the setting's behavior has changed. When parsing
the schema, now only tinyint(1) columns are now considered as
boolean, instead of all tinyint columns. This allows you to use
tinyint(4) columns for storing small integers and tinyint(1)
columns as booleans, and not have the schema parsing support
consider the tinyint(4) columns as booleans. Unfortunately,
due to limitations in the native MySQL driver, all tinyint
column values are converted to booleans upon retrieval, not just
tinyint(1) column values.
Unfortunately, the previous Sequel behavior was to use the
default tinyint size (tinyint(4)) when creating boolean columns
(using the TrueClass or FalseClass generic types). If you were
using the generic type support to create the columns, you should
modify your database to change the column type from tinyint(4) to
tinyint(1).
If you use MySQL with tinyint columns, these changes have the
potential to break applications. Care should be taken when
upgrading if these changes apply to you.
* Model classes now default to associating to other classes in the
same scope. It's highly unlikely anyone was relying on the
previous behavior, but if you have a model inside a module that
you are associating to a model outside of a module, you now need
to specify the associated class using the :class option.
* Model#save no longer includes the primary key fields in the SET
clause of the UPDATE query, only in the WHERE clause. I'm not
sure if this affects backwards compatibility of production code,
but it can break tests that expect specific SQL.
* Behavior to handle empty identifiers has now been standardized.
If any database adapter returns an empty identifier, Sequel will
use 'untitled' as the identifier. This can break backwards
compatibility if the adapter previously used another default and
you were relying on that default. This was necessary to fix any
possible "interning empty string" exceptions.
* On MSSQL, Sequel now uses the datetime type instead of the
timestamp type for generic DateTimes. It now uses bit for the
TrueClass and FalseClass generic types, and image for the File
generic type.
* Sequel now unescapes URL parts:
Sequel.connect(ado:///db?host=server%5cinstance)
However, this can break backward compatibility if you previously
expected it not to be unescaped.
* The columns_for private SQLite Database method has been removed.
sequel-5.63.0/doc/release_notes/3.30.0.txt 0000664 0000000 0000000 00000012106 14342141206 0017757 0 ustar 00root root 0000000 0000000 = Dataset Literalization Refactoring
* As warned about in the 3.29.0 release notes, dataset literalization
has been completely refactored. It now uses an append-only design
which is faster in all cases, about twice as fast for large objects
and deeply nested structures, and over two orders of magnitude
faster in some pathological cases.
This change should not affect applications, but may affect custom
extensions or adapters that dealt with literalization of objects.
Most literalization methods now have a method with an _append
suffix that does the actual literalization, which takes the sql
string to append to as the first argument. If you were overriding
a literalization method, you now probably need to override the
_append version instead. If you have this literalization method:
def foo_sql(bar)
"BAR #{literal(bar.baz)}"
end
You need to change the code to:
def foo_sql_append(sql, bar)
sql << "BAR "
literal_append(sql, bar.baz)
end
def foo_sql(bar)
sql = ""
foo_sql_append(sql, bar)
sql
end
If you have questions about modifying your custom adapter or
extension, please ask on the Google Group or the IRC channel.
= New Features
* Model#set_server has been added to the base support (it was
previously only in the sharding plugin), which allows you to
set the shard on which to save/delete the model instance:
foo1.set_server(:server_a).save
foo2.set_server(:server_a).destroy
* Model#save now accepts a :server option that uses set_server
to set the shard to use. Unlike most other #save options, this
option persists past the end of the save. Previously, the
:server option only affected the transaction code, it now
affects the INSERT/UPDATE statement as well.
* When initiating a new dataset graph, any existing selected
columns is assumed to be the columns to select for the graph from
the current/master table. Before, there was not a way to specify
the columns to select from the current/master table.
* A :graph_alias_base association option has been added, which is
used to set the base alias name to use when eager graphing. This
is mostly useful when cascading eager graphs to dependent
associations, where multiple associations with the same name in
different models are being graphed simultaneously.
* You can now specify nanoseconds and a timezone offset
when converting a hash or array to a timestamp. The nanoseconds
and offset are the 7th and 8th entries in the array, and the :nanos
and :offset entry in the hash.
* The postgres adapter now respects a :connect_timeout option if you
are using the pg driver.
= Other Improvements
* Type conversion of Java to Ruby types in the JDBC adapter has been
made much faster, as conversion method lookup is now
O(number of columns) instead of
O(number of columns*number of rows).
* Sequel::SQL::Blob literalization is now much faster on adapters that
use hex encoding, by switching to String#unpack('H*').
* Database#after_commit and after_rollback now respect the :server
option to set the server/shard to use.
* Symbol splitting (e.g. for table__column) is now slightly faster.
* All adapters now pass the dataset :limit/:offset value through
Dataset#literal instead of using it verbatim. Note that
Dataset#limit already called to_i on input strings, so this isn't
a security issue. However, the previous code broke if you
provided a Sequel-specific object (e.g. Sequel::SQL::Function) as
the :limit/:offset value.
* Calling graph and eager_graph on an already graphed dataset no
longer modifies the receiver.
* Model#set_server now correctly handles the case where @this is
already loaded.
* Dataset#destroy for model datasets now uses the dataset's shard
for transactions.
* When emulating offset support using ROW_NUMBER (on Microsoft SQL
Server, DB2, and Oracle), explicitly order by the ROW_NUMBER
result, as otherwise the results are not guaranteed to be ordered.
* Explicitly force a case insensitive collation when emulating ILIKE
on Microsoft SQL Server. Previously, ILIKE could be case sensitive
on Microsoft SQL Server if case sensitive collation was the
database default.
* Using on_duplicate_key_update with prepared statements on MySQL now
works correctly.
* The tinytds adapter now works correctly if the
identifier_output_method is nil.
* The plugin/extension specs were cleaned up using the mock adapter.
= Backwards Compatibility
* In addition to the previously mentioned dataset literalization
changes, any custom adapters that overrode *_clause_methods
methods need to be modified to add a method that adds the
SELECT/UPDATE/INSERT/DELETE. Previously, this was done by default,
but due to common table expressions and the dataset literalization
changes, a separate method is now needed.
* Dataset#on_duplicate_key_update_sql has been removed from the shared
mysql adapter.
* The :columns dataset option used when inserting is no longer
literalized in advance.
* Dataset#as_sql no longer takes an expression, it just takes the
alias, and only adds the alias part.
sequel-5.63.0/doc/release_notes/3.31.0.txt 0000664 0000000 0000000 00000013732 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* The serialization plugin can now support custom serialization
formats, by supplying a serializer/deserializer pair of
callable objects. You can also register custom deserializers
via Sequel::Plugins::Serialization.register_format, so that
they can be referenced by name. Example:
Sequel::Plugins::Serialization.register_format(:reverse,
lambda{|v| v.reverse},
lambda{|v| v.reverse})
class User < Sequel::Model
serialize_attributes :reverse, :password
end
* Dataset#import and #multi_insert now support a
:return=>:primary_key option. When this option is used, the
methods return an array of primary key values, one for each
inserted row. Usage of this option on MySQL requires that a
separate query be issued per row (instead of the single
query for all rows that MySQL would usually use).
* PostgreSQL can now use Dataset#returning in conjunction with
import/multi_insert to set a custom column to return.
* Microsoft SQL Server can now use Dataset#output in conjection with
import/multi_insert to set a custom column to return.
* Dataset#import and #multi_insert now respect a :server option to
set the server/shard on which to execute the queries.
Additionally, options given to this method are also passed to
Dataset#transaction.
* Dataset#insert_multiple now returns an array of inserted primary
keys.
* Model.def_column_alias has been added to make it easy to create
alias methods for columns. This is useful if you have a legacy
database and want to create friendly method names for the
underlying columns. Note that this alias only affects the setter
and getter methods. This does not affect the dataset level, so you
still need to use the actual column names in dataset filters.
* many_to_one associations can now have the same name as the related
foreign key column, using the :key_column option. Use of this
feature is not recommended, as it is much better to either rename
the column or rename the association. Here's an example of usage:
# Example schema:
# albums artists
# :id /--> :id
# :artist --/ :name
# :name
class Album < Sequel::Model
def_column_alias(:artist_id, :artist)
many_to_one :artist, :key_column=>:artist
end
* The mock adapter can now mock out database types, by providing a
shared adapter name as the host (e.g. mock://postgres). This
emulation is not perfect, but in most cases it allows you to see
what SQL Sequel would generate on a given database without needing
to install the required database driver.
* Sequel now supports creating full text indexes on Microsoft SQL
Server. Before using it, you must have previously setup a default
full text search catalog, and you need to provide a :key_index
option with an index name symbol.
* Dataset#group_rollup and #group_cube methods have been added for
GROUP BY ROLLUP and GROUP BY CUBE support. These features are in
a recent SQL standard, and they are supported to various degrees on
Microsoft SQL Server, DB2, Oracle, MySQL, and Derby.
* Dataset#full_text_search on Microsoft SQL Server now supports
multiple search terms.
* The jdbc adapter now supports a :login_timeout option, giving the
timeout in seconds.
= Other Improvements
* Dataset#exists can now be used with prepared statement
placeholders.
* Dataset#full_text_search can now be used with prepared statement
placeholders on PostgreSQL, MySQL, and Microsoft SQL Server.
* If tables from two separate schema are detected when parsing the
schema for a table on PostgreSQL, an error is now raised.
Previously, no error was raised, which led to weird errors later,
such as duplicate columns in a model's primary_key.
* RETURNING is now supported with UPDATE/DELETE on PostgreSQL 8.2+.
Previously, Sequel only supported it on 9.1+, but PostgreSQL
introduced support for it in 8.2.
* The shared postgres adapter now correctly handles the return value
for Dataset#insert if you provide a separate column array and value
array on PostgreSQL < 8.2.
* Handle case in the PostgreSQL adapter where the server version
cannot be determined via a query.
* H2 clob types are now treated as string instead of as blob.
Treating clob as blob breaks on H2, as it doesn't automatically
hex-unescape the input for clobs as it does for blobs.
* Dataset#empty? now works correctly when the dataset has an offset
and offset support is being emulated.
* The mock adapter no longer defaults to downcasing identifiers on
output.
= Backwards Compatibility
* Dataset#exists now returns a PlaceholderLiteralString instead of a
LiteralString, which could potentially break some code. If you
would like a String returned, you can pass the returned object to
Dataset#literal:
dataset.literal(dataset.exists)
* Dataset#from no longer handles :a__b__c___d as "a.b.c AS d". This
was not the intended behavior, and nowhere else in Sequel is a
symbol treated that way. Now, Dataset#from is consistent with the
rest of Sequel, using "a.b__c AS d". This should only affect
people in very rare cases, as most databases don't use three level
qualified tables. One exception is Microsoft SQL Server, which can
use three level qualified tables for cross-database access.
* Previously, Dataset#insert_multiple returned an array of hashes, now
it returns an array of primary key values.
* Dataset#EXRACT_CLOSE in the shared sqlite adapter has been renamed to
Dataset#EXTRACT_CLOSE.
* Dataset::StoredProcedureMethods::SQL_QUERY_TYPE and
Dataset::ArgumentMapper::SQL_QUERY_TYPE constants have been removed,
as have related sql_query_type private methods.
* The serialization plugin was significantly refactored.
Model.serialization_map now contains a callable object instead of a
Symbol, and Model.serialization_format has been removed.
Model.define_serialized_attribute_accessors private method now takes
two callable objects before the columns, instead of a single symbol.
sequel-5.63.0/doc/release_notes/3.32.0.txt 0000664 0000000 0000000 00000017324 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* Prepared statements now support :map and :to_hash prepared
statement types. The main reason for this is that certain
extensions (e.g. sequel_pg) optimize map/to_hash calls, and
there previously was not a way to use prepared statements
with the map/to_hash optimizations.
* Sequel.empty_array_handle_nulls has been added to change how
IN/NOT IN operations with an empty array are handled. See
the Backwards Compatibility section for details.
* 5 new association options have been added that allow you to
define associations where the underlying columns clash with
standard ruby method names:
many_to_one :primary_key_method
one_to_many :key_method
one_to_many :primary_key_column
many_to_many :left_primary_key_column
many_to_many :right_primary_key_method
Using these new options, you can now define associations
that work correctly when the underlying primary/foreign key
columns clash with existing ruby method names. See the RDoc
for details.
* A use_after_commit_rollback setting has been added to models.
This defaults to true, but can be set to false for performance
or to allow models to be used in prepared transactions
(which don't support after_commit/after_rollback).
* Dataset#update_ignore has been added when connecting to MySQL,
enabling use of the UPDATE IGNORE syntax to skip updating a row
if the update would cause a unique constraint to be violated.
* Database#indexes is now supported when connecting to Microsoft
SQL Server.
* On Microsoft SQL Server, the :include option is now supported
when creating indexes, for storing column values in the index,
which can be used by the query optimizer.
= Other Improvements
* The filtering/excluding by associations code now uses qualified
identifiers instead of unqualified identifiers, which allows it
to avoid ambiguous column names if you are doing your own joins.
* Virtual row blocks that return arrays are now handled correctly
in Dataset#select_map/select_order_map.
* Dataset#select_map/select_order_map can now take both a block
argument as well as a regular argument.
* Dataset#select_order_map now handles virtual row blocks that
return ordered expressions.
* Database#table_exists? should no longer generate false negatives
if you only have permission to retrieve some column values but
not all. Note that if you lack permission to SELECT from the
table itself, table_exists? can still generate false negatives.
* The active_model plugin now supports ActiveModel 3.2, by adding
support for to_partial_path.
* The serialization_modification_detection plugin now handles
changed_columns correctly both for new objects and after saving
objects.
* The serialization plugin now clears the deserialized values when
it does the automatic refresh after saving a new object, mostly for
consistency. You can use the skip_create_refresh plugin to skip
refreshing when creating a new model object.
* Column default values are now wrapped in parentheses on SQLite,
which fixes some cases such as when the default is an SQL function
call.
* Alter table emulation now works correctly on SQLite when foreign
keys reference the table being altered. The emulation requires
a renaming/deleting the existing table and creating a new table,
which can break foreign key references. Sequel now disables the
foreign key PRAGMA when altering tables, so SQLite won't track
the table renames and break the foreign key relationships.
* The set_column_type table alteration method no longer modifies
default values and NULL/NOT NULL settings on Microsoft SQL
Server, H2, and SQLite.
* On MySQL, Time/DateTime columns now use the timestamp type if the
default value is Sequel::CURRENT_TIMESTAMP, since it is currently
impossible for MySQL to have a non-constant default for a
datetime column (without using a workaround like a trigger).
* Metadata methods such as tables, views, and view_exists? are now
handled correctly on Oracle if custom identifier input methods
are used.
* Sequel now ignores errors that occur when attempting to get
information on column defaults in Oracle (which can happen if you
lack permission to the appropriate table). Previously, such errors
would cause the schema parser to raise an error, now, the schema
information is just returned without default information.
* Database#indexes now skips the primary key index when connecting to
DB2, Derby, HSQLDB, and Oracle via the jdbc adapter.
* Database#indexes now works correctly on DB2.
* The progress adapter has been fixed, it had been broken since the
dataset literalization refactoring.
* Dataset#naked! now works correctly. Previously, it just returned
the receiver unmodified.
* Dataset#paginate! has been removed, as it was broken.
* The query extension no longer breaks Dataset#clone if an argument
is not given.
* Transaction related queries are no longer logged twice in the mock
adapter.
= Backwards Compatibility
* Sequel's default handling of NOT IN operators with an empty array
of values has changed, which can change which rows are returned for
such queries.
Previously, Sequel was inconsistent in that it tried to handle NULL
values correctly in the IN case, but not in the NOT IN case. Now,
it defaults to handling NULL values correctly in both cases:
# 3.31.0
DB[:a].where(:b=>[])
# SELECT * FROM a WHERE (b != b)
DB[:a].exclude(:b=>[])
# SELECT * FROM a WHERE (1 = 1)
# 3.32.0
DB[:a].where(:b=>[])
# SELECT * FROM a WHERE (b != b)
DB[:a].exclude(:b=>[])
# SELECT * FROM a WHERE (b = b)
The important change in behavior is that in the NOT IN case, if
the left hand argument is NULL, the filter returns NULL instead
of true. This has the potential to change query results.
"Correct" here is really an opinion and not a fact, as there are
valid arguments for the alternative behavior:
DB[:a].where(:b=>[])
# SELECT * FROM a WHERE (1 = 0)
DB[:a].exclude(:b=>[])
# SELECT * FROM a WHERE (1 = 1)
The difference is that the "correct" NULL behavior is more
consistent with the non-empty array cases. For example, if b is
NULL:
# "Correct" NULL handling
# Empty array: where(:b=>[])
WHERE (b != b) # NULL
WHERE (b = b) # NULL
# Non-empty array: where(:b=>[1, 2])
WHERE (b IN (1, 2)) # NULL
WHERE (b NOT IN (1, 2)) # NULL
# Static boolean handling
# Empty array: where(:b=>[])
WHERE (1 = 0) # false
WHERE (1 = 1) # true
# Non-empty array: where(:b=>[1, 2])
WHERE (b IN (1, 2)) # NULL
WHERE (b NOT IN (1, 2)) # NULL
Sequel chooses to default to behavior consistent with the non-empty
array cases (similar to SQLAlchemy). However, there are two
downsides to this handling. The first is that some databases with
poor optimizers (e.g. MySQL) might do a full table scan with the
default syntax. The second is that the static boolean handling may
be generally perferable, if you believe that IN/NOT IN with an
empty array should always be true or false and never NULL even if
the left hand argument is NULL.
As there really isn't a truly correct answer in this case, Sequel
defaults to the "correct" NULL handling, and allows you to switch
to the static boolean handling via:
Sequel.empty_array_handle_nulls = false
This is currently a global setting, it may be made Database or
Dataset specific later if requested. Also, it is possible the
default will switch in the future, so if you care about a specific
handling, you should set your own default.
* Database#table_exists? now only rescues Sequel::DatabaseErrors
instead of StandardErrors, so it's possible it will raise errors
instead of returning false on custom adapters that don't wrap
their errors correctly.
sequel-5.63.0/doc/release_notes/3.33.0.txt 0000664 0000000 0000000 00000014153 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* A server_block extension has been added that makes Sequel's
sharding support easier to use by scoping database access inside
the block to a given server/shard:
Sequel.extension :server_block
DB.extend Sequel::ServerBlock
DB.with_server(:shard_1) do
# All of these will execute against shard_1
DB.tables
DB[:table].all
DB.run 'SOME SQL'
end
* An arbitrary_servers extension has been added that extends
Sequel's sharding support so that you can use arbitrary
connection options instead of referencing an existing, predefined
server/shard:
Sequel.extension :arbitrary_servers
DB.pool.extend Sequel::ArbitraryServers
DB[:table].server(:host=>'foo', :database=>'bar').all
You can use this extension in conjunction with the server_block
extension:
DB.with_server(:host=>'foo', :database=>'bar') do
DB.synchronize do
# All of these will execute on host foo, database bar
DB.tables
DB[:table].all
DB.run 'SOME SQL'
end
end
The combination of these two extensions makes it pretty easy to
write a thread-safe Rack middleware that scopes each request
to an arbitrary database.
* The sqlite adapter now supports an integer_booleans setting
for using 1/0 for true/false values, instead of the the 't'/'f'
values used by default. As SQLite recommends using integers to
store booleans, converting your existing database and enabling
this setting is recommended, but for backwards compatibility it
is set to false. You can convert you existing database by doing
the following for each table/column that has booleans:
DB[:table].update(:boolean_column=>{'t'=>1}.
case(0, :boolean_column))
The integer_booleans default setting may change in a future
version of Sequel, so you should set it manually to false if you
prefer the current default.
* You can now disable transaction use in migrations, in one of two
ways. You generally only need to do this if you are using an
SQL query inside a migration that is specifically not supported
inside a transaction, such as CREATE INDEX CONCURRENTLY on
PostgreSQL.
The first way to disable transactions is on a per-migration basis
by calling the no_transaction method inside the Sequel.migration
block:
Sequel.migration do
no_transaction
change do
# ...
end
end
That will make it so that a transaction is not used for that
particular migration. The second way is passing the
:use_tranctions=>false option when calling Migrator.run (using
the API), which will completely disable transactions for all
migrations during the migrator run.
* The postgres adapter now respects an :sslmode option when using
pg as the underlying driver, you can set the value of this option to
disable, allow, prefer, or require.
* Database#create_schema and #drop_schema are now defined when
connecting to PostgreSQL.
* Database#supports_savepoints_in_prepared_transactions? has been
added for checking if savepoints are supported inside prepared
transactions. This is true if both savepoints and prepared
transactions are both supported, except on MySQL > 5.5.12 (due to
MySQL bug 64374).
= Other Improvements
* The mysql and mysql2 adapters now both provide an accurate number
of rows matched, so Sequel::Model usage on those adapters will now
raise a NoExistingObject exception by default if you attempt to
delete or update an instance that no longer exists in the database.
* Foreign key creation now works correctly without specifying the
:key option when using MySQL with the InnoDB table engine. InnoDB
requires that you list the column explicitly, even if you are
referencing the primary key of the table, so if the :key option is
not given, the database schema is introspected to find the primary
key for the table. If you are attempting to create a table with
a self-referential foreign key, it introspects the generator to
get the primary key for the table.
* The sqlite adapter will now return 1/0 stored in boolean columns as
true/false. It will convert dates stored as Integers/Floats to
Date objects by assuming they represent the julian date. It will
convert times stored as Integers/Floats to Sequel::SQLTime objects
by assuming they represent a number of seconds. It will convert
datetimes stored as Integers by assuming they represent a unix
epoch time integer, and datetimes stored as Floats by assuming the
represent the julian date (with fractional part representing the
time of day). These changes make Sequel handle SQLite's
recommendations for boolean/date/time storage.
* The instance_hooks plugin's (before|after)_*_hook methods now return
self so they can be used in a method chain.
* The list plugin now automatically adds new entries to the end of the
list when creating the entries, if the position field is not
specifically set.
* An identifier_output_method is now respected in the mysql2 adapter.
* NaN/Infinity Float values are now quoted correctly for input on
PostgreSQL, and the postgres adapter correctly handles them on
retrieval from the database.
* The :collate column option is now respected when creating tables or
altering columns on MySQL.
* You can now force use of the TimestampMigrator when the
IntegerMigrator would be used by default by calling
TimestampMigrator.apply or .run.
* Mock adapter usage with a specific SQL dialect now uses the
appropriate defaults for quoting identifiers.
* You can now disable the use of sudo in the rake install/uninstall
tasks using the SUDO='' environment variable.
* A very misleading error message has been fixed when attempting
to constantize an invalid string in the model inflector.
= Backwards Compatibility
* The sqlite adapter now typecasts columns that SQLite stores as
INTEGER/REAL. Previously, it only typecasted columns that
SQLite stored as TEXT/BLOB. For details about SQLite storage, see
http://www.sqlite.org/datatype3.html.
Any custom type conversion procs used with the sqlite adapter should
be modified to work with Integer/Float objects in addition to String
objects.
sequel-5.63.0/doc/release_notes/3.34.0.txt 0000664 0000000 0000000 00000065073 14342141206 0017776 0 ustar 00root root 0000000 0000000 = New PostgreSQL Extensions
* A pg_array extension has been added, supporting PostgreSQL's
numeric and string array types. Both single dimensional and
multi-dimensional array types are supported. Array values are
returned as instances of Sequel::Postgres::PGArray, which is a
delegate class of Array. You can turn an existing array into
a PGArray using Array#pg_array.
If you are using arrays in model objects, you need to load
support for that:
DB.extend Sequel::Postgres::PGArray::DatabaseMethods
This makes schema parsing and typecasting of array columns work
correctly.
This extension also allows you to use PGArray objects and arrays
in bound variables when using the postgres adapter with pg.
* A pg_hstore extension has been added, supporting PostgreSQL's hstore
type, which is a simple hash with string keys and string or NULL
values. hstore values are retrieved as instances of
Sequel::Postgres::HStore, which is a delegate class of Hash. You
can turn an existing hash into an hstore using Hash#hstore.
If you are using hstores in model objects, you need to load
support for that:
DB.extend Sequel::Postgres::HStore::DatabaseMethods
This makes schema parsing and typecasting of hstore columns work
correctly.
This extension also allows you to use HStore objects and hashes
in bound variables when using the postgres adapter with pg.
* A pg_array_ops extension has been added, making it easier to call
PostgreSQL array operators and functions using plain ruby code.
Examples:
a = :array_column.pg_array
a[1] # array_column[1]
a[1][2] # array_column[1][2]
a.push(1) # array_column || 1
a.unshift(1) # 1 || array_column
a.any # ANY(array_column)
a.join # array_to_string(array_column, '', NULL)
If you are also using the pg_array extension, you can turn
a PGArray object into a query object, which allows you to run
operations on array literals:
a = [1, 2].pg_array.op
a.push(3) # ARRAY[1,2] || 3
* A pg_hstore_ops extension has been added, making it easier to call
PostgreSQL hstore operators and functions using plain ruby code.
Examples:
h = :hstore_column.hstore
h['a'] # hstore_column -> 'a'
h.has_key?('a') # hstore_column ? 'a'
h.keys # akeys(hstore_column)
h.to_array # hstore_to_array(hstore_column)
If you are also using the pg_hstore extension, you can turn
an HStore object into a query object, which allows you to run
operations on hstore literals:
h = {'a' => 'b'}.hstore.op
h[a] # '"a"=>"b"'::hstore -> 'a'
* A pg_auto_parameterize extension has been added for automatically
using bound variables for all queries. For example, it can take
code such as:
DB[:table].where(:column=>1)
and do:
SELECT * FROM table WHERE column = $1; -- [1]
Note that automatically parameterizing queries is not generally
faster unless the bound variables are large (i.e. long text/bytea
values). Also, there are multiple corner cases when automatically
parameterizing queries, though most can be worked around by
adding explicit casts.
* A pg_statement_cache extension has been added that works with the
pg_auto_parameterize extension for automatically caching prepared
statements and reusing them when using the postgres adapter with
pg. The combination of these two extensions makes it possible to
take an entire Sequel application and turn most or all of the
queries into prepared statements.
Note that these two extensions do not necessarily improve
performance. For simple queries, they actually hurt performance.
They do help for complex queries, but in all cases, it's faster
to use Sequel's prepared statements API manually.
= Other New Extensions
* A query_literals extension has been added that makes the select,
group, and order methods operate similar to the filter methods in
that if they are given a regular string as their first argument,
they treat it as a literal string, with additional arguments, if
any, used as placeholder values. This extension allows you to
write code such as:
DB[:table].select('a, b, ?' 2).group('a, b').order('c')
# Without query_literals:
# SELECT 'a, b, ?', 2 FROM table GROUP BY 'a, b' ORDER BY 'c'
# With query_literals:
# SELECT a, b, 2 FROM table GROUP BY a, b ORDER BY c
Sequel's default handling in this case is to use literal strings,
which is generally not desired and on some databases not even
valid syntax. In general, you'll probably want to use this
extension for all of a database's datasets, which you can do via:
Sequel.extension :query_literals
DB.extend_datasets(Sequel::QueryLiterals)
The next major version of Sequel (4.0.0) will probably integrate
this extension into the core library.
* A select_remove extension has been added that adds
Dataset#select_remove, for removing selected columns/expressions
from a dataset:
ds = DB[:table]
# Assume table has columns a, b, and c
ds.select_remove(:c)
# SELECT a, b FROM table
# Removal by column alias
ds.select(:a, :b___c, :c___b).select_remove(:c)
# SELECT a, c AS b FROM table
# Removal by expression
ds.select(:a, :b___c, :c___b).select_remove(:c___b)
# SELECT a, b AS c FROM table
This method makes it easier to select all columns except for the
columns given. This is common in cases where a table has a few
large columns that are expensive to retrieve. This method does
have some corner cases, so read the documentation before using it.
* A schema_caching extension has added that makes it possible for
Database instances to dump the cached schema metadata to a
marshalled file, and load the cached schema metadata from the file.
This can be significantly faster than reparsing the schema from the
database, especially for databases with high latency.
bin/sequel -S has been added to dump the schema for the given
database to a file, and DB.load_schema_cache(filename) can be used
to populate the schema cache inside your application. This should
be done after creating the Database object but before loading your
model files.
Note that Sequel does no checking to ensure that the cached schema
currently reflects the state of the database. That is up to the
application.
* A null_dataset extension has been added, which adds
Dataset#nullify for creating a dataset that will not issue a
database query. It implements the null object pattern for
datasets, and is probably most useful in methods that must return
a dataset, but can determine that such a dataset will never return
a row.
= New Plugins
* A static_cache plugin has been added, allowing you to cache a model
statically. This plugin is useful for models whose tables do not
change while the application is running, such as lookup tables.
When using this plugin, the following methods will no longer require
queries:
* Primary key lookups (e.g. Model[1])
* Model.all calls
* Model.each calls
* Model.map calls without an argument
* Model.to_hash calls without an argument
The statically cached model instances are frozen so they are not
accidently modified.
* A many_to_one_pk_lookup plugin has been added that changes the
many_to_one association retrieval code to do a simple primary
key lookup on the associated class in most cases. This results
in significantly better performance, especially if the
associated model is using a caching plugin (either caching
or static_cache).
= Core Extension Replacements
* Most of Sequel's core extensions now have equivalent methods defined
on the Sequel module:
:column.as(:alias) -> Sequel.as(:column, :alias)
:column.asc -> Sequel.asc(:column)
:column.desc -> Sequel.desc(:column)
:column.cast(Integer) -> Sequel.cast(:column, Integer)
:column.cast_numeric -> Sequel.cast_numeric(:column)
:column.cast_string -> Sequel.cast_string(:column)
:column.extract(:year) -> Sequel.extract(:year, :column)
:column.identifier -> Sequel.identifier(:column)
:column.ilike('A%') -> Sequel.ilike(:column, 'A%')
:column.like('A%') -> Sequel.like(:column, 'A%')
:column.qualify(:table) -> Sequel.qualify(:table, :column)
:column.sql_subscript(1) -> Sequel.subscript(:column, 1)
:function.sql_function(1) -> Sequel.function(:function, 1)
'some SQL'.lit -> Sequel.lit('some SQL')
'string'.to_sequel_blob -> Sequel.blob('string')
{:a=>1}.case(0) -> Sequel.case({:a=>1}, 0)
{:a=>1}.sql_negate -> Sequel.negate(:a=>1)
{:a=>1}.sql_or -> Sequel.or(:a=>1)
[[1, 2]].sql_value_list -> Sequel.value_list([[1, 2]])
[:a, :b].sql_string_join -> Sequel.join([:a, :b])
~{:a=>1} -> Sequel.~(:a=>1)
:a + 1 -> Sequel.+(:a, 1)
:a - 1 -> Sequel.-(:a, 1)
:a * 1 -> Sequel.*(:a, 1)
:a / 1 -> Sequel./(:a, 1)
:a & 1 -> Sequel.&(:a, 1)
:a | 1 -> Sequel.|(:a, 1)
* You can now wrap any object in a Sequel expression using
Sequel.expr. This is similar to the sql_expr extension, but
without defining the sql_expr method on all objects:
1.sql_expr -> Sequel.expr(1)
The sql_expr extension now just has Object#sql_expr call
Sequel.expr.
* Virtual Rows now have methods defined that handle the standard
mathematical operators:
select{|o| o.+(1, :a)} # SELECT (1 + a)
the standard inequality operators:
where{|o| o.>(2, :a)} # WHERE (2 > a)
and the standard boolean operators:
where{|o| o.&({:a=>1}, o.~(:b=>1))} # WHERE ((a = 1) AND (b != 1))
Additionally, there is now direct support for creating literal
strings in instance_evaled virtual row blocks using `:
where{a > `some crazy SQL`} # WHERE (a > some crazy SQL)
This doesn't override Kernel.`, since virtual rows use a BasicObject
subclass. Previously, using ` would result in calling the SQL
function named ` with the given string, which probably isn't valid
syntax on most databases.
* You can now require 'sequel/no_core_ext' to load Sequel without the
core extensions. The previous way of setting the
SEQUEL_NO_CORE_EXTENSIONS constant or environment variable before
loading Sequel still works.
* The core extensions have been moved from Sequel's core library into
an extension that is loadable with Sequel.extension. This extension
is still loaded by default for backwards compatibility. However,
the next major version of Sequel will no longer load this extension
by default (though it will still be available to load manually).
* You can now check if the core extensions have been loaded by using
Sequel.core_extensions?.
= Foreign Keys in the Schema Dumper
* Database#foreign_key_list has been added that gives an array of
foreign key constraints on the table. It is currently implemented
on MySQL, PostgreSQL, and SQLite, and may be implemented on other
database types in the future. Each entry in the return array is
a hash, with at least the following keys present:
:columns :: An array of columns in the given table
:table :: The table referenced by the columns
:key :: An array of columns referenced (in the table specified by
:table), but can be nil on certain adapters if the primary
key is referenced.
The hash may also contain entries for:
:deferrable :: Whether the constraint is deferrable
:name :: The name of the constraint
:on_delete :: The action to take ON DELETE
:on_update :: The action to take ON UPDATE
* The schema_dumper extension now dumps foreign key constraints on
databases that support Database#foreign_key_list. On such
databases, dumping a schema migration will dump the tables in
topological order, such that referenced tables always come before
referencing tables.
In case there is a circular dependency, Sequel breaks the
dependency and adds separate foreign key constraints at the end
of the migration. However, when a circular dependency is broken,
the migration can probably not be migrated down.
Foreign key constraints can also be dumped as a separate migration
using Database#dump_foreign_key_migration, similar to how
Database#dump_indexes_migration works.
* When using bin/sequel -C to copy databases, foreign key constraints
are now copied if the source database supports
Database#foreign_key_list.
= Other New Features
* Dataset#to_hash_groups and #select_hash_groups have been added.
These methods are similar to #to_hash and #select_hash in that they
return a hash, but hashes returned by *_hash_groups methods have
arrays of all matching values, unlike the *_hash methods which
just use the last matching value. Example:
DB[:table].all
# => [{:a=>1, :b=>2}, {:a=>1, :b=>3}, {:a=>2, :b=>4}]
DB[:table].to_hash(:a, :b)
# => {1=>3, 2=>4}
DB[:table].to_hash_groups(:a, :b)
# => {1=>[2, 3], 2=>[4]}
* Model#set_fields and #update_fields now accept :missing=>:skip and
:missing=>:raise options, allowing them to be used in more cases.
:missing=>:skip skips missing entries in the hash, instead of
setting the field to the default hash value. :missing=>:raise
raises an error for missing fields, similar to
strict_param_setting = true. It's recommended that these options
be used in new code in preference to #set_only and #update_only.
* Database#drop_table? has been added, for dropping tables if they
already exist. This uses DROP TABLE IF EXISTS on the databases that
support it. Database#supports_drop_table_if_exists? has been added
for checking whether the database supports that syntax.
* Database#create_join_table has been added that allows easy
creation of many_to_many join tables:
DB.create_join_table(:album_id=>:albums, :artist_id=>:artists)
This uses real foreign keys for both of the columns, uses a
composite primary key of both of the columns, and adds an
additional composite index of the columns in reverse order. The
primary key and additional index should ensure that almost all
operations on the join table can benefit from an index.
In terms of customization, the values in the hash can be hashes
themselves for column specific options, and an additional options
hash can also be given to override some of the default settings.
Database#drop_join_table also exists and takes the same options
as create_join_table. It mostly exists to make it easy to
reverse migrations that use create_join_table.
* Model#freeze has been added that freezes a model such that it
works correctly in a read-only state. Before, it used the standard
Object#freeze, which broke some things that should work, and
allowed changes that shouldn't be allowed (like modifying the
instance's values).
* ConnectionPool#all_connections has been added, which yields each
available connection in the pool to the block. For threaded pools,
it does not yield connections that are currently being used by
other threads. When using this method, it is important to only
operate on the yielded connection objects, and not make any
modifications to the pool itself. The pool is also locked until
the method returns.
* ConnectionPool#after_connect= has been added, allowing you to
change a connection pool's after_connect proc after instantiating
the pool.
* ConnectionPool#disconnection_proc= has been added, allowing you to
change a connection pool's disconnection_proc after instantiating the
pool.
* A Model.cache_anonymous_models accessor has been added, and can be
set to false to disable the caching of classes created by
Sequel::Model(). This caching is only useful if you want to reload
the model's file without getting a superclass mismatch. This
setting is true by default for backwards compatibility, but may be
changed to false in a later version, so you should manually set it to
true if you are using code reloading.
* Model.instance_dataset has been added for getting the dataset used
for model instances (a naked dataset restricted to a single row).
* Dataset#with_sql_delete has been added for running the given SQL
string as a delete and returning the number of rows modified. It's
designed as a replacement for with_sql(sql).delete, which is slower
as it requires cloning the dataset.
* The :on_update and :on_delete entries for foreign_key now accept
string arguments which are used literally.
* Prepared statement objects now have a log_sql accessor that can be
turned on to log the entire SQL statement instead of just the
prepared statement name.
* Dataset#multi_replace has been added on MySQL. This is similar to
multi_insert, but uses REPLACE instead of INSERT.
* Dataset#explain has been added to MySQL. You can use an
:extended=>true option to use EXPLAIN EXTENDED.
* A Database#type_supported? method has been added on PostgreSQL to
check if the database supports the given type:
DB.type_supported?(:hstore)
* Datatabase#reset_conversion_procs has been added to the postgres
adapter, for use by extensions that modify the default conversion
procs and want to have the database use the updated defaults.
* A Database#convert_infinite_timestamps accessor has been added to
the postgres adapter, allowing you to return infinite timestamps as
nil, a string, or a float.
* SQL::PlaceholderLiteralString objects can now use a placeholder
array, where placeholder values are inserted between array elements.
This is about 2.5-3x faster than using a string with ? placeholders,
and allows usage of ? inside the array:
Sequel.lit(["(", " ? ", ")"], 1, 2) # (1 ? 2)
* SQL::Subscript#[] has been added for accessing members of a
multi-dimensional array:
Sequel.subscript(:column, 1)[2][3] # column[1][2][3]
* SQL::Wrapper has been added for wrapping arbitrary objects in a
Sequel expression object.
* SQL::QualifiedIdentifier objects can now contain arbitrary Sequel
expressions. Before, they could only contain a few expression
types. This makes it easier to add extensions to support
PostgreSQL row-valued types.
= Performance Improvements
* Model.[] when called with a primary key has been made about 110%
faster for most models by avoiding cloning datasets.
* Model.[] when called without arguments or with a single nil argument
is much faster as it now returns nil immediately instead of issuing
a database query.
* Model#delete and Model#destroy have been made about 75% faster for
most models by using a static SQL string.
* Model.new is now twice as fast when passed an empty hash.
* Model#set is now four times as fast when passed an empty hash.
* Model#this has been made about 85% faster by reducing the number of
dataset clones needed from 3 to 1.
* Some proc activations have been removed, giving minor speedups when
running on MRI.
= Other Improvements
* Database#uri and #url now return the connection string given
to Sequel.connect. Previously, they tried to reconstruct the
url using the database's options, but that didn't work well in
corner cases.
* Database#inspect now shows the URL and/or options given when
connecting to the database. Previously, it showed the URL, or
all of the databases options if constructing the URL raised an
error.
* Sequel no longer checks for prepared transactions support when
using transactions unless a prepared transaction is specifically
requested.
* The schema utility dataset cached in the Database object is now
reset if you use Database#extend_datasets, ensuring that the new
value will use the given extension.
* The prepared_statements* plugins now log the full SQL by default.
Since the user doesn't choose the name of the prepared statements,
it was often difficult to determine what SQL was actually run if
you were only looking at a subsection of the SQL log.
* The nested_attributes plugin's delete/remove support now works
correctly when a false value is given for _delete/_remove and
strict_param_setting is true.
* The hook_class_methods and validation_class_methods plugins
now work correctly when subclassing if the subclass attempts to
create instances inside Model.inherited.
* The caching plugin has been refactored. Model.cache_get_pk and
cache_delete_pk have been added for retrieving/deleting from the
cache by primary key. Model.cache_key is now a public method.
* The typecast_on_load plugin now works correctly when saving
new model objects when insert_select is supported.
* In the sql_expr extension, nil.sql_expr is no longer treated as
a boolean value. It is now treated as a value with generic type.
* The postgres adapter no longer issues a query to map type names to
type oids if no named conversion procs have been registered.
* The postgres adapter now works around issues in ruby-pg by
supporting fractional seconds for Time/DateTime values, and
supporting SQL::Blob (bytea) values with embedded "\0" characters.
* The postgres adapter now supports pre-defining the PG_NAMED_TYPES
and PG_TYPES constants. This is so extensions can define them,
so they don't have to load the postgres adapter file first. If
extensions need to use these constants, they should do:
PG_NAMED_TYPES = {} unless defined?(PG_NAMED_TYPES)
PG_TYPES = {} unless defined?(PG_TYPES)
That way they work whether they are loaded before or after the
postgres adapter.
* PostgreSQL 8.2-9.0 now correctly add the RETURNING clause when
building queries. Sequel 3.31.0 added support for returning values
from delete/update queries in PostgreSQL 8.2-9.0, but didn't change
the literalization code to use the RETURNING clause on those
versions.
* The jdbc/postgres adapter now converts Java arrays
(Java::OrgPostgresqlJdbc4::Jdbc4Array) to ruby arrays.
* Tables and schemas with embedded ' characters are now handled
correctly when parsing primary keys and sequences on PostgreSQL.
* Identifiers are now escaped on MySQL and SQLite. Previously they
were quoted, but internal ` characters were not doubled.
* Fractional seconds for the time type are now returned correctly on
jdbc (assuming they are returned as java.sql.Time values by JDBC).
* Multiple changes were made to ensure that Sequel works correctly
when the core extensions are not loaded.
* Composite foreign key constraints are now retained when emulating
alter_table operations on SQLite. Previously, only single
foreign key constraints were retained.
* An error is no longer raised when no indexes exist when calling
Database#indexes on jdbc/sqlite.
* A possible SystemStackError has been fixed in the SQLite adapter,
when trying to delete a dataset that uses a having clause and no
where clause.
* ROLLUP/CUBE support now works correctly on Microsoft SQL Server
2005.
* Unsigned tinyint types are now recognized in the schema dumper.
* Using primary_key :column, :type=>Bignum now works correctly on H2.
Previously, the column created was not autoincrementing.
* Using a bound variable for a limit is now supported in the ibmdb
adapter on ruby 1.9.
* Connecting to PostgreSQL via the swift adapter has been fixed when
using newer versions of swift.
* The mock adapter now handles calling the Database#execute methods
directly (instead of via a dataset).
* The mock adapter now has the ability to have per-shared adapter
specific initialization code executed. This has been used to fix
some bugs when using the shared postgres adapter.
* The pretty_table extension has been split into two extensions, one
that adds a method to Dataset and one that just adds the
PrettyTable class. Also, PrettyTable.string has been added to get
a string copy of the table.
* A spec_model_no_assoc task has been added for running model specs
without the association plugin loaded. This is to check that the
SEQUEL_NO_ASSOCIATIONS setting works correctly.
= Deprecated Features to be Removed in Sequel 3.35.0
* Ruby <1.8.7 support is now deprecated.
* PostgreSQL <8.2 support is now deprecated.
* Dataset#disable_insert_returning on PostgreSQL is now deprecated.
Starting in 3.35.0, RETURNING will now always be used to get the
primary key value when inserting.
* Array#all_two_pairs? is now deprecated. It was part of the core
extensions, but the core extensions have been refactored to no
longer require it. As it doesn't specifically relate to creating
Sequel expression objects, it is being removed. The private
Array#sql_expr_if_all_two_pairs method is deprecated as well.
= Other Backwards Compatibility Issues
* The generic Bignum type now uses bigint on SQLite, similar to
other databases. The integer type was previously used. The only
exception is for auto incrementing primary keys, which still use
integer for Bignum as SQLite doesn't support autoincrementing
columns other than integer.
* On SQLite, Dataset#explain now returns a string, similar to
PostgreSQL (and now MySQL).
* When using the JDBC adapter, Java::OrgPostgresqlUtil::PGobject
objects are converted to ruby strings if the dataset is set to
convert types (the default setting). This is to support the
hstore extension, but it could have unforeseen effects if custom
types were used.
* For PostgreSQL connection objects, #primary_key and #sequence now
require their arguments are provided as already literalized
strings. Note that these methods are being removed in the next
version because they will not be needed after PostgreSQL <8.2
support is dropped.
* Database#uri and #url now return a string or nil, but never raise
an exception. Previously, they would either return a string
or raise an exception.
* The Model @simple_pk and @simple_table instance variables should
no longer be modified directly. Instead, the setter methods should
be used.
* Model.primary_key_lookup should no longer be called with a nil
value.
* Logging of prepared statements on some adapters has been changed
slightly, so log parsers might need to be updated.
* Dataset#identifier_append and #table_ref_append no longer treat
literal strings and blobs specially. Previously, they were treated
as identifiers.
* Dataset#qualified_identifier_sql_append now takes 3 arguments, so
any extensions that override it should be modified accordingly.
* Some internally used constants and private methods have been
deleted:
Database::CASCADE
Database::NO_ACTION
Database::SET_DEFAULTS
Database::SET_NULL
Database::RESTRICT
Dataset::COLUMN_ALL
or moved:
MySQL::Dataset::AFFECTED_ROWS_RE -> MySQL::Database
MySQL::Dataset#affected_rows -> MySQL::Database
* The sql_expr extension no longer creates the
Sequel::SQL::GenericComplexExpression class.
sequel-5.63.0/doc/release_notes/3.35.0.txt 0000664 0000000 0000000 00000013246 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* A dirty plugin has been added, which saves the initial value of
the column when the column is changed, similar to
ActiveModel::Dirty:
artist.name # => 'Foo'
artist.name = 'Bar'
artist.initial_value(:name) # 'Foo'
artist.column_change(:name) # ['Foo', 'Bar']
artist.column_changes # {:name => ['Foo', 'Bar']}
artist.column_changed?(:name) # true
artist.reset_column(:name)
artist.name # => 'Foo'
artist.column_changed?(:name) # false
artist.update(:name=>'Bar')
artist.column_changes # => {}
artist.previous_changes # => {:name=>['Foo', 'Bar']}
* Database#create_table now respects an :as option to create a
database based on the results of a query. The :as option value
should either be an SQL string or a dataset.
DB.create_table(:new_foos, :as=>DB[:foos].where(:new=>true))
* The json_serializer and xml_serializer plugins can now serialize
arbitrary arrays of model objects by passing an :array option
to the to_json class method. This works around an issue in
ruby's JSON library where Array#to_json does not pass arguments
given to it to the members of the array.
Artist.to_json(:array=>[Artist[1]], :include=>:albums)
* You can now use the % (modulus) operator in the same way you
can use the bitwise operators in Sequel:
:column.sql_number % 1 # (column % 1)
* On PostgreSQL, you can now provide :only, :cascade, and :restart
options to Dataset#truncate to use ONLY, CASCADE, and
RESTART IDENTITY. Additionally, you can now truncate multiple
tables at the same time:
DB.from(:table1, :table2).truncate(:cascade=>true)
* The :index option when creating columns in the schema generator
can now take a hash of index options:
DB.create_table(:foo){Integer :bar, :index=>{:unique=>true}}
* A Database#cache_schema accessor has been added, it can be set
to false to have the Database never cache schema results. This
can be useful in Rails development mode, so that you don't need to
restart a running server to have models pick up the new schema.
* Database#log_exception has been added for easier instrumentation.
It is called with the exception and SQL query string for all
queries that raise an exception.
* The Sequel.migration DSL now has a transaction method that forces
transaction use for the given migration.
= Other Improvements
* Many theoretical thread-safety issues have been fixed for ruby
implementations that don't use a global interpreter lock.
Previously, Sequel relied on MRI's global interpreter lock for
part of its thread safety, now it does manually locking in more
places to avoid thread-safety issues on JRuby (and other ruby
implementations without a global interpreter lock).
No Sequel user ever reported a production error related to the
previous thread-safety issues, and most of the issues fixed
were so difficult to hit that even tests specifically designed
to raise errors were unable to do so.
* Sequel.single_threaded = true now disables the mutex
synchronization that enforces thread safety for additional
performance in single threaded mode.
* Sequel's migrators now only attempt to use transactions by
default if the underlying database supports transactional DDL.
SQLite does support transactional DDL, but Sequel will not
use transactions for SQLite migrations as it causes issues
when emulating alter_table operations for tables with foreign
keys.
* Errors that occur when rolling back database transactions are
now handled correctly. Previously, the underlying exception was
raised, it wasn't correctly wrapped in a Sequel::DatabaseError,
and if it was due to a database disconnection, the connection
wasn't removed from the pool.
* Sequel no longer sets ruby instance variables on java objects,
fixing warnings on JRuby 1.7 and attempting to be forward
compatible with JRuby 2.0.
* Sequel now uses date and timestamp formats that are multilanguage
and not DATEFORMAT dependent on Microsoft SQL Server.
* Sequel now correctly escapes blackslash-carriage return-line feed
on Microsoft SQL Server.
* Parsing the column default values in the oracle adapter no longer
requires database superuser privileges.
* Sequel now correctly handles parsing schema for tables in other
databases on MySQL. Previously, it would always look in the
current database.
* Sequel no longer doubles backslashes in strings by default. It
now only does so on MySQL, since that is the only database that
appears to use backslashes for escaping. This fixes issues with
backslashes being doubled on some of the less commonly used
adapters.
* The pg_auto_parameterize extension now works correctly when
using cursors.
* Dataset#truncate now raises an Error if you attempt to do so
on a dataset that uses HAVING. Previously, it only checked for
WHERE.
* The schema dumper now recognized the identity type.
= Backwards Compatibility
* Association reflections now store cached information in a
separate subhash due to the thread-safety changes. Any code
accessing an association reflection should always call the
related method to get the cached data instead of checking
for a specific location in the hash.
* Association reflection internals for many_through_many associations
changed significantly, any code that accesses the edge information
in the reflection will need to be changed to use the new methods
instead of accessing the old values directly.
* The features deprecated in 3.34.0 have now been removed:
* Ruby <1.8.7 support
* PostgreSQL <8.2 support
* Dataset#disable_insert_returning on PostgreSQL
* Array#all_two_pairs? and #sql_expr_if_all_two_pairs
sequel-5.63.0/doc/release_notes/3.36.0.txt 0000664 0000000 0000000 00000025744 14342141206 0020001 0 ustar 00root root 0000000 0000000 = New Features
* An eager_each plugin has been added, which automatically makes
eagerly loaded datasets do eager loading if you call #each (or
another Enumerable method) instead of #all. By default, if you
call #each on an eager dataset, it will not do eager loading,
and if you call #each on an eager_graph dataset, you will
get plain hashes with columns from all joined tables instead of
model objects. With this plugin, #each on both eager and
eager_graph datasets will do eager loading.
* The nested attributes plugin now supports composite primary keys
in associated records. Additionally, it now deals better with
natural primary keys in associated records. There is a new
:unmatched_pk option that can be set to :create if you want to
create new associated records when the input hash contains
primary key information that doesn't match one of the existing
associated objects.
The nested attributes plugin now also supports a :transform option.
If given, this option is called with the parent object and the
input hash given for each associated record passed into the
nested atttributes setter. The callable should return the hash
of attributes to use.
* Model#from_json in the json_serializer plugin now takes an options
hash and recognizes the :fields option. If the :fields option is
given, it should be an array of field names, and set_fields is
called with the array instead of using set. This allows you to
easily filter which fields in the hash are set in the model
instance. The entire options hash is also passed to set_fields
if :fields is present, so you can additionally use the :missing =>
:raise or :missing => :skip options that set_fields supports.
* The Dataset#to_json method in the json_serializer plugin now
respects :root=>:collection and :root=>:instance options. If
:root=>:collection is given, only the collection is wrapped in a
hash, and if :root=>:instance is given, only the instances are
wrapped in a hash. For backwards compatibility, both the
instances and collection are wrapped in a hash:
Model.to_json(:root=>true)
# {"models":[{"model":{"id":1}}]}
Model.to_json(:root=>:collection)
# {"models":[{"id":1}]}
Model.to_json(:root=>:instance)
# [{"model":{"id":1}}]
Wrapping both the collection and instance in a root by default
is probably an undesired behavior, so the default for :root=>true
may change in the next major version of Sequel. Users who want
the current behavior should switch to using :root=>:both.
* The schema_dumper extension now respects an :index_names option
when dumping. This option can be set to false to never dump the
index names. It can also be set to :namespace, in which case if
the database does not have a global index namespace, it will
automatically prefix the name of the index with the name of the
table.
Database#global_index_namespace? was added to check if the
database uses a global index namespace. If false, index names are
probably namespaced per table (MySQL, MSSQL, Oracle).
* :each is now a valid prepared statement type. This prepared
statement type requires a block when you call the statement, and
iterates over the records of the statement a row at a time.
Previously, there wasn't a way to iterate over the records of a
prepared statement a row at a time, since the :select and :all
types collect all rows into an array before iterating over them.
* A :connection_handling=>:queue option is now respected for
database objects, and changes the threaded connection pools to use
a queue instead of a stack as the data structure for storing
available connections. A queue does not perform as well as a
stack, but reduces the likelihood of stale connections.
It is possible that Sequel will change in the future from using a
stack by default to using a queue by default, so any users who
specifically desire a stack to be used should specify the
:connection_handling=>:stack option.
* Sequel::Migrator now supports is_current? class method to check
if there are no outstanding migrations to apply. It also supports
a check_current class method, which raises an exception if there
are outstanding migrations to apply.
* A pg_json extension has been added, supporting PostgreSQL's 9.2
json type, similarly to the pg_array and pg_hstore extensions.
Note that with the current PostgreSQL json code, the root object
can be a string or number, but ruby's json library requires the
root json value to be an object or array. So you will probably
get an exception if you attempt to retrieve a PostgreSQL json
value that ruby's JSON library won't parse.
* A pg_inet extension has been added, which automatically typecasts
PostgreSQL inet and cidr types to ruby IPAddr objects on retrieval.
* Database#transaction on PostgreSQL now recognizes :read_only and
:deferrable options, and can use them to set the READ ONLY and
DEFERRABLE transaction flags. A :synchronous option is also
recognized, which can be set to true, false, :local, or
:remote_write, and sets the value of synchronous_commit just for
that transaction.
* When adding and dropping indexes on PostgreSQL, a :concurrently
option can be used to create or drop the index CONCURRENTLY, which
doesn't require a full write table lock.
* When dropping indexes on PostgreSQL, :if_exists and :cascade options
are now recognized.
* When using alter_table set_column_type on PostgreSQL, the :using
option is respected, and can be used to force a specific conversion
from the previous value to the new value with the USING syntax.
* On MySQL, you can now set an :sql_mode option when connecting. This
can be a string or symbol or an array of them, and each should match
one of MySQL's sql_modes. MySQL's default SQL mode is fairly loose,
and using one of the strict sql modes is recommended, but for
backwards compatibility, Sequel will not set a specific SQL mode by
default. However, that may change in the next major version of
Sequel, so to be forwards compatible you should set :sql_mode=>nil
if you do not desire a strict SQL mode to be set automatically.
* Partial indexes are now supported on Microsoft SQL Server 2008
(SQL Server refers to them as filtered indexes). Attempting to
use a partial index on an earlier version of SQL Server will
result in the database raising an exception.
* A jdbc/progress adapter has been added, supporting the Progress
database via the jdbc adapter.
= Other Improvements
* Dataset#get now works correctly if you pass it a nil or false
argument. Previously, it ignored the argument and used the block
instead. If you want to use the block argument, you should not
pass in a regular argument.
* Database#call now passes any blocks given to it to the underlying
prepared statement object. Before, a passed block was ignored.
* Sequel::Model.db is no longer set automatically when creating
an anonymous class with an associated database object. This fixes
cases where a library would create namespaced models, and the
database used by the library would be set as the default for the
user's application code.
* Model *_to_one association setters are now no-ops if you pass a
value that is the same as the cached value. This fixes issues with
reciprocal associations getting reordered, and is better
for performance.
For cases where the old behavior is desired, the
set_associated_object_if_same? method can be overridden to return
true for object. If you are manually setting objects in the
associations cache before calling the setter method, you may want
to set that.
* The dirty plugin no longer affects the return value of refresh
and lock!. Internal changes should now help ensure that plugins
don't affect the return values of these methods.
* Sequel now supports JRuby 1.7's new exception handling, fixing
exception handling when connecting in the jdbc adapter.
* When dumping unsigned integer types in the schema dumper, if the
unsigned values could overflow a 32-bit signed integer type,
the generic Bignum class is used as the type. This should fix
issues when copying a database containing an unsigned 32-bit
integer column with values between 2^31 and 2^32-1.
* In the optimistic_locking plugin, attempting to refresh and
save after a failed save now works correctly. Before, the second
save would never modify a row.
* Time types on jdbc/postgres are now typecasted accurately on
retrieval, before they could be off by up to a millisecond due to
floating point issues.
* Disconnect detection in the mysql2 adapter has been improved.
* The jdbc/mysql, do/mysql, and swift/mysql adapters all now support
the :timeout option to set the MySQL wait_timeout.
* Savepoints in prepared transactions are now supported on MySQL
5.5.23+, since the bug that caused them to be unsupported starting
in 5.5.13 has been fixed.
* Parsing foreign key metadata for tables with an explicit
schema now works correctly on PostgreSQL.
* bin/sequel -C now namespaces indexes automatically when copying
from a database without a global index namespace to a database
with a global index namespace.
* Indexes are now dropped in reverse order that they were added in
the schema_dumper.
* The Model typecasting code works around bugs in objects where
object.==('') would raise an exception instead of returning false.
* A better error message is used if an invalid JDBC URL is
provided and the JDBC driver's new.connect method returns NULL.
* A document describing Sequel's object model has been added,
describing the objects Sequel uses to represent SQL concepts.
* Most adapter specific options to Database methods are now mentioned
in the main Database method RDoc.
= Backwards Compatibility
* The nested_attributes plugin internals changed significantly. If
you were overriding one of the nested_attributes* private methods
and calling super to get the default behavior, you may have to
update your code.
* Database#case_sensitive_like has been removed on SQLite. This
method never worked correctly, it always returned false even if
the case_sensitive_like PRAGMA was set. That's because SQLite
doesn't offer a getter for this PRAGMA, only a setter. Note that
Database#case_sensitive_like= still exists and works correctly.
* Database#single_value has been removed from the native SQLite
adapter. This method was designed for internal use, and hasn't
been used for some time. Any current users of the method should
switch to Dataset#single_value.
* The private Database#defined_columns_for method in the SQLite
adapter no longer takes an options hash.
* A couple jdbc/postgres adapter methods are now private. Previously,
the jdbc/postgres adapter overrode some private superclass methods
but left the methods public.
* When using the optimistic_locking plugin, refreshing inside a
before_update method after calling super will now result in the
lock checking being skipped.
* The private Model#_refresh no longer returns self, so external
plugins should no longer rely on that behavior.
sequel-5.63.0/doc/release_notes/3.37.0.txt 0000664 0000000 0000000 00000034037 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* Database#extension and Dataset#extension have been added and
make it much easier to use extensions that just define modules,
where you previously had to manually extend a Database or
Dataset object with the module to get the extension's behavior.
These methods operate similarly to model plugins, where you just
specify the extension symbol, except that you can specify multiple
extensions at once:
DB.extension(:pg_array, :pg_hstore)
For databases, these modify the Database itself (and
potentially all of its datasets). Dataset#extension operates
like other dataset methods, returning a modified clone of
the dataset with the extension added:
dataset = dataset.extension(:columns_introspection)
Dataset#extension! has also been added for modifying the
receiver instead of returning a clone.
Not all extensions are usable by Database#extension or
Dataset#extension, the extension has to have specific support
for it. The following extensions support both
Database#extension and Dataset#extension:
* columns_introspection
* query_literals
* split_array_nil
The following extensions support just Database#extension:
* arbitrary_servers
* looser_typecasting
* pg_array
* pg_auto_parameterize
* pg_hstore
* pg_inet
* pg_interval
* pg_json
* pg_range
* pg_statement_cache
* server_block
Any user that was loading these extensions with Sequel.extension
and then manually extending objects with the extension's module
is encouraged to switch to Database#extension and/or
Dataset#extension.
* Dataset join methods now respect a :qualify=>:deep option
to do deep qualification of expressions, allowing qualification
of subexpressions in the expression tree. This can allow you
to do things like:
DB[:a].join(:b, {:c.cast(Integer)=>:d.cast(Integer)},
:qualify=>:deep)
# SELECT * FROM a INNER JOIN b
# ON (CAST(b.c AS INTEGER) = CAST(a.d AS INTEGER))
For backwards compatibility, by default Sequel will only do
automatic qualification if the arguments are simple symbols.
This may change in a future version, if automatic qualification
of only symbols is desired, switch to using :qualify=>:symbol.
You can also choose to do no automatic qualification using the
:qualify=>false option.
* All of Sequel's model associations now work with key expressions
that are not simple column references, without creating a fully
custom association. So you can create associations where the
primary/foreign key values are stored in PostgreSQL array or
hstore columns, for example.
* The pg_array extension has now been made more generic, so that it
is easy to support array types for any scalar type that is
currently supported. All scalar types that Sequel's postgres
adapter supports now have corresponding array types supported in
the pg_array extension. So if you load the pg_array extension and
return a date array column, the returned values will be arrays of
ruby Date objects.
Other pg_* extensions that add support for PostgreSQL-specific
scalar types now support array versions of those types if the
pg_array extension is loaded first.
* A pg_range extension has been added, making it easy to deal
with PostgreSQL 9.2+'s range types. As ruby's Range class does
not support all PostgreSQL range type values (such as empty ranges,
unbounded ranges, or ranges with an exlusive beginning), range
types are returned as instances of Sequel::Postgres::PGRange, which
has an API similar to Range. You can turn a PGRange into a Range
using PGRange#to_range, assuming that the range type value does not
use features that are incompatible with ruby's Range class.
The pg_range extension supports all range types supported by
default in PostgreSQL 9.2, and makes it easy to support custom
range types.
* A pg_range_ops extension has been added, which adds DSL support for
PostgreSQL range operators and functions, similar to the
pg_array_ops and pg_hstore_ops extensions.
* A pg_interval extension has been added, which makes Sequel return
PostgreSQL interval types as instances of ActiveSupport::Duration.
This is useful if you want to take the interval value and use it in
calculations in ruby (assuming you load the appropriate parts of
ActiveSupport).
* A split_array_nil extension has been added, which changes how Sequel
compiles IN/NOT IN expressions with arrays with nil values.
where(:col=>[1, nil])
# Default:
# WHERE (col IN (1, NULL))
# with split_array_nil extension:
# WHERE ((col IN (1)) OR (col IS NULL))
exclude(:col=>[1, nil])
# Default:
# WHERE (col NOT IN (1, NULL))
# with split_array_nil extension:
# WHERE ((col NOT IN (1)) AND (col IS NOT NULL))
* The nested_attributes plugin now allows the :fields option to
be a proc, which is called with the associated object and should
return an array of allowable fields.
* You can now specify the graph alias base when using eager_graph on
a per-call basis. Previously, it could only be set on a per
association basis. This is helpful if you have multiple
associations to the same class, and are cascading the eager graph to
dependent associations of that class for both of the associations.
Previously, there was no way to manually give descriptive names to
the tables in the cascaded associations, but you can now do so
by passing the association as an Sequel::SQL::AliasedExpression
instance instead of a plain Symbol. Here's a usage example:
ds = Game.eager_graph(:winner=>:players.as(:winning_players),
:loser=>:players.as(:losing_players)).
where(:winning_players__name=>'A',
:losing_players__name=>'B')
* many_through_many associations now differentiate between column
references and method references, by supporting the
:left_primary_key_column and :right_primary_key_method options that
many_to_many associations support.
* Custom :eager_loader procs that accept a single hash argument now
have an additional entry passed in the hash, :id_map, which is
easier to use than the :key_hash entry (which is still present for
backwards compatibility). Anyone with custom :eager_loader procs is
encouraged to switch from using :key_hash to :id_map.
* You can now override the create_table/alter_table schema generators
per database/adapter. This allows for database specific generator
subclasses, which have methods for unique features for that
database.
* You can now setup exclusion constraints on PostgreSQL using the
create_table and alter_table schema generators:
DB.create_table(:t) do
...
exclude([[:col1, '&&'], [:col2, '=']])
# EXCLUDE USING gist (col1 WITH &&, col2 WITH =)
end
One common use for exclusion constraints is to make sure that no two
rows have overlapping values/ranges/circles.
* When adding foreign key constraints to an existing table on
PostgreSQL, you can use the :not_valid option to mark the constraint
as not yet valid. This will make it so that future changes to the
table need to respect the foreign key constraint, but existing rows
do not. After cleaning up the existing data, you can then use the
alter_table validate_constraint method to mark the constraint as
valid.
* An eval_inspect extension has been added that attempts to do
do the following for Sequel::SQL::Expression instances:
eval(obj.inspect) == obj # => true
There are a lot of cases that this extension does not handle, but
it does a decent job in most cases. This is currently only used
internally in a specific case in the schema_dumper extension.
= Other Improvements
* The filter by associations support now respects the method
reference vs column reference distinction that other parts of the
association code have respected since 3.32.0.
* In the nested_attributes plugin, new one_to_one associated
values are saved once instead of twice. Previously it attempted to
save them before they were associated to the current model object,
which can violate some validations/constraints.
* When saving an associated object in the one_to_one association
setter method, Sequel no longer adds an unnecessary filter
condition when nullifying the foreign key for existing rows
in the associated table.
* The list plugin's before_create method now calls super, which
fixes usage when other plugins that define before_create are loaded
before it.
* In the pg_array extension, when typecasting an Array to PGArray,
a recursive map is done on the input array to convert each value
in the input array to the expected type, using the typecasting
method that would be used for the scalar value. For example, for
model objects, where ids is an integer array column:
model.set(:ids=>['1', '2']).ids.to_a # => [1, 2]
* The pg_array extension now correctly handles bytea arrays used
in bound variables.
* The pg_array extension no longer uses the JSON-based parser for
floating point types, since it doesn't handle NaN and Infinity
values correctly.
* When typecasting in the pg_array extension, PGArray values are
only returned verbatim if they have a matching database type.
Otherwise, the underlying array is rewrapped in a new PGArray
value with the correct database type.
* H2 clob types are now recognized as strings instead of blobs.
Previously the code attempted to do this, but it didn't do so
correctly.
* The jdbc/postgres adapter now converts scalar values of
the array to the appropriate type. Previously, if you retrieved
a date array, you got back a ruby array of JavaSQL::SQL::Date
instances. Now, you get back a ruby array of ruby Date instances.
* The schema_dumper extension now dumps migrations as change
migrations, instead of separate up/down migrations, resulting in
simpler code.
* When dumping non-integer foreign keys in the schema dumper, an
explicit type is now used. Previously, the column would have been
dumped as an integer column.
* When dumping unsigned integer columns in the schema dumper, add a
column > 0 constraint in the dumped migration.
* On Microsoft SQL Server, when updating a dataset with a limit,
the limit is now respected.
* When emulating offset using the ROW_NUMBER window function,
do not require that the dataset be ordered. If an order is
not provided, default to ordering on all of the columns in
the dataset. If you want to override the default order used
in such a case, you need to override the default_offset_order
method for the dataset.
* On SQLite, casting to Date/Time/DateTime now calls an SQLite
date/datetime function instead of using a cast, as SQLite treats
such a cast as a cast to integer.
* When using JRuby 1.6 in ruby 1.9 mode and typecasting a time
column, workaround a bug where Time#nsec is 0 even though
Time#usec is not.
* The odbc/mssql adapter now correctly handles the case where
SCOPE_IDENTITY returns NULL after an insert.
* bin/sequel now accepts multiple -l options for logging to multiple
output files.
* In addition to Sequel's rigorous pre-push testing, Sequel now
also uses TravisCI for continuous integration testing across
a wider range of ruby implementations.
= Backwards Compatibility
* The keys in the :key_hash entry passed to the :eager_loader proc
are now method references instead of column references. For most
associations, they are the same thing, but for associations using
the :key_column/:primary_key_column/:left_primary_key_column
options, the values could be different. If you were using one
of those options and had a custom eager_loader, you should switch
from indexing into the :key_hash option to just using the :id_map
option.
* The :key_hash entry passed to the :eager_loader proc is now no
longer guaranteed to contain key maps for associations other than
the one currently being eagerly loaded. Previously, it contained
key maps for all associations that were being eagerly loaded. If
you have a custom :eager_loader proc that accessed a key map for
a separate association that was being loaded concurrently, you'll
now have to build the key map manually if it doesn't exist.
* If you previously explicitly specified an :eager_loader_key option
when defining an association, you may need to change it so that it
is a method reference instead of a column reference, or possibly
just omit the option.
* If you have a custom :eager_loader proc for an association where
the default :eager_loader_key option references a method that
the model does not respond to (or raises an exception), you may
need to specify the :eager_loader_key=>nil option.
* In the pg_auto_parameterize extension, String values are no longer
automatically casted to text. This is because the default type of
a string literal in PostgreSQL is unknown, not text. This makes it
much less likely to require manual casts, but has the potential to
break existing code relying on the automatic cast to text. As a
work around, any query that can no longer be automatically
parameterized after this query just needs to add manual casting
to text.
* Sequel now raises an exception if you attempt to clone associations
with different types, except if one type is one_to_many and the
other is one_to_one. Cloning from other types was usually a bug,
and raising an exception early will make it much easier to track
such bugs down.
* When running the plugin/extension and PostgreSQL adapter specs,
a json library is now required.
* The json/postgres adapter array typecasting internals have been
modified, if you were relying on the internals, you may need to
update your code.
* The pg_array extension internals changed significantly. PGArray
no longer has any subclasses by default, as parsing is now done
in separate objects. Anyone relying on the pg_array internals
will need to update their code.
* The postgres adapter no longer sets up type conversion of int2vector
and money types, since in both cases the conversion was incorrect in
most cases. These types will now be returned as strings. If you are
relying on the conversion, you'll need to add your own custom type
procs.
sequel-5.63.0/doc/release_notes/3.38.0.txt 0000664 0000000 0000000 00000020713 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* A pg_row extension has been added that supports PostgreSQL's
row-valued/composite types. You can register support for
specific row types:
DB.register_row_type(:address)
Then you can create values of that row type:
ad = DB.row_type(:address, ['555 Foo St.', 'Bar City', '98765'])
# or
ad = DB.row_type(:address, :street=>'555 Foo St.',
:city=>'Bar City', :zip=>'98765')
Which you can use in your datasets:
DB[:people].insert(:name=>'Me', :address=>ad)
If you are using the native postgres adapter, when retreiving
row type values, they will be returned as instances of the row
type, which are hash-like objects:
ad = DB[:people].get(:address)
ad[:street] # => '555 Foo St.'
ad[:city] # => 'Bar City'
ad[:zip] # => '98765'
If you are also using the pg_array extension, then arrays of
composite types are supported automatically. Composite
types can also include arrays of other types as well as other
composite types, though recursive composite types are not
allowed by PostgreSQL.
Using arrays and composite types brings one of the benefits
of document databases to PostgreSQL, allowing you to store
nested structures inside a single row.
* A pg_row_ops extension has been added that adds DSL support
for accessing members of row-valued/composite types. You
first create a row op:
r = Sequel.pg_row_op(:row_column)
Then you can get DSL support for accessing members of that
row_column via the #[] method:
r[:a] # (row_column).a
This works with composite types containing composite types:
r[:a][:b] # ((row_column).a).b
When used in conjunction with the pg_array_ops extension,
there is support for composite types that include arrays,
as well as arrays of composite types:
r[1][:a] # (row_column[1]).a
r[:a][1] # (row_column).a[1]
The extension offers additional support for referencing
a table's type when it contains a column with the same
name, see the RDoc for details.
* A pg_row plugin has been added, that works with the pg_row
extension, and allows you to represent row-valued types as
Sequel::Model objects (instead of the hash-like objects
they use by default). In your model class, you load the
plugin:
class Address < Sequel::Model(:address)
plugin :pg_row
end
Then you can use Address instances in your datasets:
ad = Address.new(:street=>'555 Foo St.',
:city=>'Bar City', :zip=>'98765')
DB[:people].insert(:name=>'Me', :address=>ad)
And if you are using the native postgres adapter, the dataset
will return the type as a model instance:
ad = DB[:people].get(:address)
ad.street # => '555 Foo St.'
ad.city # => 'Bar City'
ad.zip # => '98765'
* A pg_typecast_on_load plugin has been added. This plugin is
designed for use with the jdbc/postgres, do/postgres, and
swift/postgres adapters, and it is similar to the
typecast_on_load plugin. However, while the typecast_on_load
plugin uses setter methods, the pg_typecast_on_load plugin
uses the same code that the native postgres adapter uses for
typecasting.
* The tinytds adapter now supports a :textsize option to override
the default TEXTSIZE setting. The FreeTDS default is fairly
small (~64k), so if you want to use large blob or text columns,
you should probably set this to a value larger than the
largest text/blob you want to use.
* Sequel.expr when called with a symbol now splits the symbol and
returns an Identifier, QualifiedIdentifier, or AliasedExpression,
depending on the content of the symbol. Previously, it only
wrapped the symbol using a Wrapper.
* Identifier#* and QualifiedIdentifier#* when called without any
argument now represent a selection of all columns from the
represented table:
Sequel.expr(:table).* # table.*
Sequel.expr(:schema__table).* # schema.table.*
This makes it easier to represent the selection of all columns
in a table without using the core extensions.
* Model#values now has a Model#to_hash alias.
* SQL::Blob values now have as, cast, and lit methods even if the
core extensions are not loaded.
= Other Improvements
* When loading multiple pg_* extensions into a Database instance,
the conversion procs are only reset once instead of once per
extension.
* All adapters that access PostgreSQL now store type conversion
procs, similar to the native postgres adapter. This has been
added to make it easier to write extensions that support
advanced PostgreSQL types.
* Database#schema output on PostgreSQL now includes the type oid
for each column.
* You can now register custom array types to specific Database
instances, using the :type_procs and :typecast_methods_module
options, so it is now possible to have custom array types
without affecting global state.
* Dropping of columns with defaults now works correctly on
Microsoft SQL Server. Before, it would fail as the related
constraint was not dropped first.
* The MySQL type "double(x,y)" is now recognized as a float type.
* The jdbc/jtds and jdbc/derby adapters now handle nil prepared
statement values in more cases.
* Blob prepared statement arguments are now handled correctly on
jdbc/db2 and jdbc/oracle.
* Sequel now works around a Time#nsec bug in JRuby 1.6 ruby 1.9 mode
when using Time values in prepared statements in the jdbc adapter.
* Java::JavaUtil::UUID types are now returned as ruby strings
when converting types in the jdbc adapter.
* Real boolean literals are now used on derby 10.7+. On derby <10.7
Sequel still uses (1 = 1) and (1 != 1) for true and false. This
allows you to use boolean columns with a true/false default on
derby 10.7+.
* Clobs are now treated as string types instead of blobs on derby,
since treating clob as blob doesn't work there.
* The swift adapter now supports an output identifier method.
* The swift adapter now returns blobs as SQL::Blob instances.
* The schema_dumper extension no longer produces code that requires
the core extensions.
* All of Sequel's specs now run without the core extensions loaded,
ensuring that none of the internals depend on the core extensions.
The only exception is the specs for the core extensions themselves.
= Backwards Compatibility
* The pg_* extensions no longer modify core classes if the
core_extensions extension is not loaded. All methods they added now
have equivalent methods on the main Sequel module:
Sequel.pg_array
Sequel.pg_array_op
Sequel.hstore
Sequel.hstore_op
Sequel.pg_json
Sequel.pg_range
Sequel.pg_range_op
* The Sequel::SQL::IdentifierMethods module has been removed. This
module was only included in Symbol if the core_extensions were
enabled. Since it only defined a single method, now the core
extensions just define that method directly on Symbol.
* The swift adapter now requires swift-db-{postgres,mysql,sqlite3}
gems instead of the swift gem. swift/postgres requires
swift-db-postgres 0.2.0+, swift/sqlite requires swift-db-sqlite
0.1.2+, and swift/mysql requires swift-db-mysql.
* Sequel will no longer typecast a string to a PostgreSQL array
or hstore column in a model column setter. This is because the
parsers that Sequel uses were designed to support only
PostgreSQL's output format. It's unlikely that a user would
provide that format for typecasting, and while there aren't known
security issues with the parsers, they were not designed to handle
arbtirary user input, so typecasting from string is no longer
allowed and will now raise an error.
The only reason such typecasting was allowed in the first place
was to work around issues in the jdbc/postgres, do/postgres, and
swift/postgres adapters, using the the typecast_on_load plugin.
If you were previously using the typecast_on_load plugin for
hstore or array columns, you need to switch to using the new
pg_typecast_on_load plugin.
* The private get_conversion_procs method in the postgres adapter
no longer accepts an argument.
* The Sequel::Postgres::PGArray::DatabaseMethods singleton
define_array_typecast_method method has been removed. This
method was designed for internal use.
* The change to make Sequel.expr split symbols can cause the
following type of code to break:
Sequel.expr(:column___alias).desc
This is because expr now returns an AliasedExpression, which
doesn't support the desc method. However, as you can't
apply an order to an aliased expression, nobody should be
relying on this.
sequel-5.63.0/doc/release_notes/3.39.0.txt 0000664 0000000 0000000 00000021520 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* A constraint_validations extension and plugin have been added,
which allow you to define validations when creating tables,
which are enforced by database constraints, and have those
validations be automatically discovered and used by your
Sequel::Model classes.
The extension is designed to be used in your migrations/schema
modification code:
DB.extension(:constraint_validations)
DB.create_constraint_validations_table
DB.create_table(:foos) do
primary_key :id
String :name
validate do
min_length 5, :name
end
end
This creates a database CHECK constraint that ensures that the
minimum length for the column is 5 characters. It also adds
metadata about the validation to the
sequel_constraint_validations table.
To have the model class automatically create validations, just
include the plugin in the model:
class Foo < Sequel::Model
plugin :constraint_validations
end
Note that MySQL does not enforce CHECK constraints (it parses
but ignores them), so using the extension on MySQL does not
actually enforce constraints at the database level, though it
still does support the automatic model validations if the plugin
is used.
* Dataset#count now takes an argument or a virtual row block,
allowing you to do:
DB[:table].count(:column_name)
DB[:table].count{function_name(column1, column2)}
When count is given an argument, instead of returning the total
number of rows, it returns the number of rows where the
argument has a non-NULL value.
* Database#copy_into has been added to the postgres adapter when
the pg driver is being used, and can be used for very fast
inserts into tables if you already have the input preformatted
in PostgreSQL text or CSV format.
* set_table_not_null has been added to the alter table generator,
for a nicer API:
alter_table(:t){set_column_not_null :col}
# instead of
alter_table(:t){set_column_allow_null :col, false}
Additionally, set_column_allow_null now defaults the second
argument to true for a nicer API:
alter_table(:t){set_column_allow_null :col}
# instead of
alter_table(:t){set_column_allow_null :col, true}
* Database#supports_regexp? has been added for checking if the
database supports Regexp in filters. Currently, only MySQL and
PostgreSQL support Regexps.
Attempting to use a Regexp on a database that doesn't support it
now raises an error when attempting to generate the SQL, instead
of sending invalid SQL to the database.
* Sequel.char_length has been added for a cross platform
char_length function (emulated when char_length is not supported
natively by the database).
* Sequel.trim has been added for a cross platform trim function
(emulated when trim is not supported natively by the database).
* ValidationFailed and HookFailed exceptions now have a model method
that returns the model instance related to the exception. This
makes it possible to use Model.create inside a begin/rescue block
and get access to the underlying instance if there is a validation
or before/around hook error.
* The subclasses plugin now accepts a block, which is called with
each model class created. This is useful if you want to apply
changes to classes created in the future instead of just existing
classes.
* The validates_unique validation in the validation_helpers plugin
now accepts a :where option for a custom uniqueness filter. Among
other things this makes it easy to implement a case insensitive
uniqueness validation on a case sensitive column.
* The threaded connection pools now support a
:connection_handling=>:disconnect option, which makes them disconnect
connections after use instead of returning them to the pool. This
makes it possible to completely control connection lifetime using
Database#synchronize.
* The pg_row_op extension now has support for PGRowOp#*, for referencing
the members of the composite type as separate columns.
* MySQL's set type and default value are now recognized.
* bin/sequel now accepts a -c argument for running an arbitrary
code string instead of using an IRB prompt.
= Other Improvements
* Sequel now parses current date/timestamp column defaults when
parsing the schema for a table. The values will be returned
as Sequel::CURRENT_DATE for date columns and
Sequel::CURRENT_TIMESTAMP for timestamp columns.
The schema_dumper extension will work with these defaults, so
if you dump the schema for a table with a column that uses
a current timestamp default, the dumped schema will include
the default.
The defaults setter plugin also works with these changes, so
that when new model objects are instantiated, they get the
current Date/Time/DateTime values set.
* On MySQL and PostgreSQL, Sequel will now by default attempt
to combine multiple alter_table operations into a single
query where it believes it can do so correctly. This can
potentially improve performance ~N times, where N is the number
of alter table operations.
This can change the SQL used for old migrations (though it
shouldn't change the result), and is a potentially risky
change. This may be disabled by default in future versions
if it causes problems.
* The defaults_setter plugin now correctly sets false default
values.
* The schema_dumper plugin now preserves fractional seconds
in timestamp column defaults when dumping.
* Time->DateTime and DateTime->Time typecasts now retain
fractional seconds on ruby 1.8.
* Array arguments passed to most PGArrayOp methods are now
automatically wrapped in a PGArray. If you want to use this
support, you need to make sure to load both the pg_array
and pg_array_op extensions.
* Sequel now does a better job of finding the sequence for a
given table on PostgreSQL, handling more corner cases. A small
side effect of this is sometimes sequence names will be quoted.
* Some potential thread-safety issues when using Sequel with
PostgreSQL on a non-GVL ruby implementation have been fixed.
* Sequel now correctly caches the server version query on MySQL.
* Sets of alter_table operations on MySQL and Microsoft SQL Server
that require parsing the current database schema, where later
alter_table operations depend on earlier ones, should now work
correctly.
* You can now drop check constraints on tables on SQLite, though
doing so drops all check constraints on the table, not only the
specific check constraint given.
* The identity_map plugin no longer breaks if used with a model
without a primary key.
* Sequel::SQL::NegativeBooleanConstant now inherits from Constant
instead of BooleanConstant. This means that
Sequel::NULL == Sequel::NOTNULL
is now false instead of true.
* You can now override the convert_tinyint_to_bool settings on a
per-Dataset basis in the mysql and mysql2 adapters, though
the overriding is different depending on the adapter. Check the
commit log for details.
* timestamp(N) types are now recognized as datetime, which should
fix certain cases on Oracle.
* Dataset#insert now handles a single model instance argument
as a single value if the model uses the pg_row plugin.
* When joining a model dataset using a model class as the table
argument, a subselect is used unless the model is a simple select
from the underlying table.
* The specs now cleanup after themselves, dropping the tables that
they create for testing.
= Backwards Compatibility
* The defaults_setter plugin's behavior changed due to the
current date/timestamp support. Previously, it would not set
a value for the column, since the default wasn't recognized.
Therefore, the database would use the default value on insert,
which would be the database's current timestamp.
Now, the value is set to the current Date/Time/DateTime on
model object instantiation, so the database wouldn't use the
column default. Instead of the database's current timestamp
on insert, the column value will be the application's
current timestamp on model instantiation.
Users who don't want this behavior can remove the default values
in the model:
Model.default_values.delete(:column_name)
* Plain (non-model) datasets no longer allow insert to accept
a single model instance argument. Also, they no longer call
values on a single argument if the object responds to it.
* Plain (non-model) datasets no longer accept model classes as
tables in the join/graph methods. Also, they no longer call
table_name on the argument if the object responds to it.
* The schema_dumper extension now requires the eval_inspect
extension, which changes inspect output for
Sequel::SQL::Expression objects.
* Custom adapters that override Database#alter_table_sql_list now
need to make sure it returns an already flattened array.
* The identity_map_key method in the identity_map plugin now returns
nil instead of a random string if the given pk is nil.
sequel-5.63.0/doc/release_notes/3.4.0.txt 0000664 0000000 0000000 00000030716 14342141206 0017707 0 ustar 00root root 0000000 0000000 New Plugins
-----------
* A nested_attributes plugin was added allowing you to modify
associated objects directly through a model object, similar to
ActiveRecord's Nested Attributes.
Artist.plugin :nested_attributes
Artist.one_to_many :albums
Artist.nested_attributes :albums
a = Artist.new(:name=>'YJM',
:albums_attributes=>[{:name=>'RF'}, {:name=>'MO'}])
# No database activity yet
a.save # Saves artist and both albums
a.albums.map{|x| x.name} # ['RF', 'MO']
It takes most of the same options as ActiveRecord, as well as a
a few additional options:
* :destroy - Allow destruction of nested records.
* :limit - For *_to_many associations, a limit on the number of
records that will be processed, to prevent denial of service
attacks.
* :remove - Allow disassociation of nested records (can remove the
associated object from the parent object, but not destroy the
associated object).
* :strict - Set to false to not raise an error message if a primary
key is provided in a record, but it doesn't match an existing
associated object.
If a block is provided, it is passed each nested attribute hash.
If the hash should be ignored, the block should return anything
except false or nil.
* A timestamps plugin was added for automatically adding
before_create and before_update hooks for setting values on
timestamp columns. There are a couple of existing external
plugins that handle timestamps, but the implementations are
suboptimal. The new built-in plugin supports the following
options (with the default in parentheses):
* :create - The field to hold the create timestamp (:created_at)
* :force - Whether to overwrite an existing create timestamp
(false)
* :update - The field to hold the update timestamp (:updated_at)
* :update_on_create - Whether to set the update timestamp to the
create timestamp when creating (false)
* An instance_hooks plugin was added for adding hooks to specific
w
model instances:
obj = Model.new
obj.after_save_hook{do_something}
obj.save # calls do_something after the obj has been saved
All of the standard hooks are supported, except for
after_initialize. Instance level before hooks are executed in
reverse order of addition before calling super. Instance level
after hooks are executed in order of addition after calling super.
If any of the instance level before hook blocks return false, no
more instance level before hooks are called and false is returned.
Instance level hooks are cleared when the object is saved
successfully.
* A boolean_readers plugin was added for creating attribute? methods
for boolean columns. This can provide a nicer API:
obj = Model[1]
obj.active # Sequel default column reader
obj.active? # Using the boolean_readers plugin
You can provide a block when loading the plugin to change the
criteria used to determine if the column is boolean:
Sequel::Model.plugin(:boolean_readers) do |c|
db_schema[c][:db_type] =~ /\Atinyint/
end
This may be useful if you are using MySQL and have some tinyint
columns that represent booleans and others that represent integers.
You can turn the convert_tinyint_to_bool setting off and use the
attribute methods for the integer value and the attribute? methods
for the boolean value.
Other New Features
------------------
* Sequel now has support for converting Time/DateTime to local or UTC
time upon storage, retrieval, or typecasting.
There are three different timezone settings:
* Sequel.database_timezone - The timezone that timestamps use in
the database. If the database returns a time without an offset,
it is assumed to be in this timezone.
* Sequel.typecast_timezone - Similar to database_timezone, but used
for typecasting data from a source other than the database. This
is currently only used by the model typecasting code.
* Sequel.application_timezone - The timezone that the application
wants to deal with. All Time/DateTime objects are converted into
this timezone upon retrieval from the database.
Unlike most things in Sequel, these are only global settings, you
cannot change them per database. There are only three valid
timezone settings:
* nil (the default) - Don't do any timezone conversion. This is
the historical behavior.
* :local - Convert to local time/Consider time to be in local time.
* :utc - Convert to UTC/Consider time to be in UTC.
So if you want to store times in the database as UTC, but deal with
them in local time in the application:
Sequel.application_timezone = :local
Sequel.database_timezone = :utc
If you want to set all three timezones to the same value:
Sequel.default_timezone = :utc
There are three conversion methods that are called:
* Sequel.database_to_application_timestamp - Called on time objects
coming out of the database. If the object coming out of the
database (usually a string) does not have an offset, assume it is
already in the database_timezone. Return a Time/DateTime object
(depending on Sequel.datetime_class), in the application_timzone.
* Sequel.application_to_database_timestamp - Used when literalizing
Time/DateTime objects into an SQL string. Converts the object to
the database_timezone before literalizing them.
* Sequel.typecast_to_application_timestamp - Called when
typecasting objects for model datetime columns. If the object
being typecasted does not already have an offset, assume it is
already in the typecast_timezone. Return a Time/DateTime object
(depending on Sequel.datetime_class), in the
application_timezone.
Sequel does not yet support named timezones or per thread
modification of the timezone (for showing all timestamps in the
current user's timezone). Extensions to support both features are
planned for a future version.
* Dataset#truncate was added for truncating tables. Truncate allows
for fast removal of all rows in a table.
* Sequel now supports typecasting a hash to date, time, and datetime
types. This allows easy usage of Sequel with forms that split
the entry of these database types into separate from fields.
With this code, you can just have field names like:
date[year]
date[month]
date[day]
Rack will parse that into:
{'date'=>{'year'=>?, 'month'=>?, 'day'=>?}}
So then you can do:
obj.date = params['date']
# or
obj.set(params)
* validates_unique now takes a block that can be used to scope the
uniqueness constraint. This allows you to easily set up uniqueness
validations that are only necessary in a given scope. For example,
a validation on username, but only for active users (as inactive
users are soft deleted but remain in the table). You just pass a
block to validates_unique:
validates_unique(:name){|ds| ds.filter(:active)}
* The serialization plugin now supports json.
* Sequel now supports generic concepts of
CURRENT_{DATE,TIME,TIMESTAMP}. Most databases support these SQL
concepts, but not all, and some implementations act differently.
The Sequel::SQL::Constants module holds the three constants,
which are instances of SQL::Constant, an SQL::GenericExpression
subclass. This module is included in Sequel, so you can reference
the constants more easily (e.g. Sequel::CURRENT_TIMESTAMP).
It's separated out into a separate module so that you can just
include that module in the top level scope, allowing you to
reference the constants directly (e.g. CURRENT_TIMESTAMP).
DB[:events].filter{date < ::Sequel::CURRENT_DATE}
# or:
include Sequel::SQL::Constants
DB[:events].filter{date < ::CURRENT_DATE}
* Database#run was added for executing arbitrary SQL on a database.
It's an alias for Database#<<, but it allows for a nicer API inside
migrations, since you can now do:
run 'SQL'
instead of:
self << 'SQL'
You can also provide a :server option to run the SQL on the
given server/shard:
run 'SQL', :server=>:shard1
* Sequel::Model() can now take a database argument in addition to
a symbol or dataset argument. If a database is given, it'll create
an anonymous subclass attached to the given database. Other changes
were made to allow the following code to work:
class Item < Sequel::Model(DB2)
end
That will work correctly assuming a table named items in DB2.
* Dataset#ungrouped was added for removing a grouping from an
existing dataset. Also, Dataset#group when called with no arguments
or with a nil argument also removes any existing grouping instead
of resulting in invalid SQL.
* Model#modified? was added, letting you know if the model has been
modified. If the model hasn't been modified, calling
Model#save_changes will do nothing.
* SQL::OrderedExpression now supports #asc, #desc, and #invert.
Other Improvements
------------------
* The serialization and lazy_attribute plugins now add accessor
methods to a module included in the class, instead of to the
model class itself. This allows the methods to be overridden
in the class and work well with super, as well for the plugins
to work together on the same column. Make sure the
lazy_attributes accessor is setup before the serialization
accessor if you want to have a lazy serialized column.
* Calling the add_* method for many_to_many association now saves the
record if the record is new. This makes it operate more similarly
to one_to_many associations. Previously, it raised an Error.
* Dataset#import now works correctly when called with a dataset.
Previously, it generated incorrect SQL.
* The JDBC adapter now converts byte arrays to/from SQL::Blob.
* The JDBC adapter now attempts to bind unknown types using
setObject instead of raising, so it can work with native Java
objects. It also binds boolean parameters correctly.
* Using multiple emulated ALTER TABLE statements (such as
drop_column) in a single alter_table block now works correctly
on SQLite.
* Database#indexes now works on JDBC for tables in a non-default
schema. It also now properly detects unique indexes on MSSQL.
* Database#schema on JDBC now accepts a :schema option. Also,
returned schema hashes now include a :column_size entry specifying
the maximum length/precision for the column, since the
:db_type entry doesn't have contain the information on JDBC.
* Datasets without tables now work correctly on Oracle, so things
like DB.get(...) now work.
* A descriptive error message is given if you attempt to use
Sequel with the mysql.rb driver (which Sequel doesn't support).
* The postgres adapter now works correctly with a modified
postgres-pr that raises PGErrors instead of RuntimeErrors
(e.g. http://github.com/jeremyevans/postgres-pr).
* You now get a Sequel::InvalidOperation instead of a NoMethodError
if you attempt to update a dataset without a table.
* The inflection support has been modified to reduce code
duplication.
Backwards Compatibility
-----------------------
* Sequel now includes fractional seconds in timestamps for all
adapters except MySQL. It's possible that this may break
timestamp columns for databases that are not regularly tested.
* Sequel now includes timezone values in timestamps on Microsoft
SQL Server, Oracle, PostgreSQL and SQLite. The modification for
SQLite is probably the biggest cause for concern, since SQLite
stores times as text. If you have an SQLite database that uses
timestamps and is accessed by something other than Sequel, you
should make sure that it works with the timestamp format that
Sequel now uses.
* The default timestamp format used by Sequel now uses a space
instead of 'T' between the date and time parts, which could
possibly affect some databases that are not regularly tested.
* Attempting to insert into a grouped dataset or a dataset that
selects from multiple tables will now raise an Error. Previously,
it would ignore any GROUP or JOIN settings and generate bad SQL if
there were multiple FROM tables.
* Database#<< now always returns nil. Before, the return value was
adapter dependent.
* ODBC::Time and ODBC::DateTime values are now converted to the
Sequel.datetime_class. Before, ODBC::Time used Time and
ODBC::DateTime used DateTime regardless of the
Sequel.datetime_class setting.
* The default inflections were modified, fixing some obvious errors
and possibly changing some existing inflections. Further changes
to the default inflections are unlikely.
sequel-5.63.0/doc/release_notes/3.40.0.txt 0000664 0000000 0000000 00000005363 14342141206 0017767 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now has vastly improved support for Microsoft Access.
* Sequel now supports the CUBRID database, with a cubrid adapter
that uses the cubrid gem, and a jdbc/cubrid adapter for accessing
CUBRID via JDBC on JRuby.
* The association_pks plugin now supports composite keys.
* Database#transaction now accepts a :disconnect=>:retry option,
in which case it will automatically retry the block if it
detects a disconnection. This is potentially dangerous, and
should only be used if the entire block is idempotent. There
is also no checking against an infinite retry loop.
* SQL::CaseExpression#with_merged_expression has been added, for
converting a CaseExpression with an associated expression to
one without an associated expression, by merging the expression
into each condition.
= Other Improvements
* Sequel now quotes arguments/columns in common table expressions.
* Sequel now handles nil values correctly in the pg_row extension.
* Sequel::Postgres::HStore instances can now be marshalled.
* Sequel now uses clob for String :text=>true types on databases that
don't support a text type.
* On PostgreSQL, Sequel now quotes channel identifier names when using
LISTEN/NOTIFY.
* On PostgreSQL, Sequel now correctly handles the case where named
type conversion procs have been added before the Database object is
instantiated.
* On DB2, Sequel now explicitly sets NOT NULL for unique constraint
columns instead of foreign key columns. DB2 does not allow columns
in unique constraints to be NULL, but does allow foreign key columns
to be NULL.
* In the oracle adapter, clob values are now returned as ruby strings
upon retrieval.
* Sequel now detects more types of disconnections in the postgres,
mysql, and mysql2 adapters.
* If a database provides a default column value that isn't a ruby
string, it is used directly as the ruby default, instead of causing
the schema parsing to fail.
= Backwards Compatibility
* Code using Sequel's oracle adapter that expected clob values to be
returned as OCI8::CLOB instances needs to be modified to work with
ruby strings.
* Because Sequel now quotes column names in common table expressions,
those names are now case sensitive, which could break certain poorly
coded queries. Similar issues exist with the quoting of channel
identifier names in LISTEN/NOTIFY on PostgreSQL.
* The private Database#requires_return_generated_keys? method
has been removed from the jdbc adapter. Custom jdbc subadapters
relying on this method should override the private
Database#execute_statement_insert method instead to ensure that
RETURN_GENERATED_KEYS is used for insert statements.
* The private Dataset#argument_list and #argument_list_append methods
have been removed.
sequel-5.63.0/doc/release_notes/3.41.0.txt 0000664 0000000 0000000 00000014005 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* A connection_validator extension has been added, which
automatically determines if connections checked out from the pool
are still valid. If they are not valid, the connection is
disconnected and another connection is used automatically,
transparent to user code.
Checking if connections are valid requires a query, so this
extension causes a performance hit. For that reason, connections
are only checked by default if they have been inactive for more than
a configured amount of time (1 hour by default). You can choose to
validate connections on every checkout via:
DB.pool.connection_validation_timeout = -1
However, this can cause a substantial performance hit unless you are
purposely using coarse connection checkouts via manual calls to
Database#synchronize (for example, in a Rack middleware). Using
coarse checkouts can greatly reduce the amount of concurrency that
Sequel supports (for example, limiting the number of concurrent
requests to the number of database connections), so this method is
not without its tradeoffs.
* Sequel.delay has been added for a generic form of delayed
evaluation. This method takes a block and delays evaluating it
until query literalization. By default, Sequel evaluates most
arguments immediately:
foo = 1
ds = DB[:bar].where(:baz=>foo)
# SELECT * FROM bar WHERE (baz = 1)
foo = 2
ds
# SELECT * FROM bar WHERE (baz = 1)
Using Sequel.delay, you can delay the evaluation:
foo = 1
ds = DB[:bar].where(:baz=>Sequel.delay{foo})
# SELECT * FROM bar WHERE (baz = 1)
foo = 2
ds
# SELECT * FROM bar WHERE (baz = 2)
* Sequel now supports the :unlogged option when creating tables on
PostgreSQL, to create an UNLOGGED table.
* On SQLite, Database#transaction now supports a :mode option for
setting up IMMEDIATE/EXCLUSIVE SQLite transactions. Sequel also
supports a Database#transaction_mode accessor for setting the
default transaction mode on SQLite.
* Most pg_* extension objects (e.g. PGArray) now support the #as
method for creating an SQL::AliasedExpression object.
* The single_table_inheritance plugin now supports non-bijective
mappings. In lay terms, this means that a one-to-one mapping
of column values to classes is no longer required. You can now
have multiple column values that map to a single class in the
:model_map option, and specify a :key_chooser option to choose
which column value to use for the given model class.
* The touch plugin now handles the touching of many_to_many
associations, and other associations that use joined datasets.
* ConnectionPool#pool_type has been added. It returns a symbol
representing the type of connection pool in use (similar to
Database#database_type).
* Database#valid_connection? has been added for checking if a given
connection is still valid.
* Database#disconnect_connection is now part of the public API, and
can be used to disconnect a given connection.
= Other Improvements
* Uniqueness validation now correctly handles nil values.
Previously, it checked the underlying table for other rows where
the column IS NULL, but that is incorrect behavior. Sequel's new
(correct) behavior is to skip the uniqueness check if the column
is nil.
* Foreign key parsing is now supported on Microsoft SQL Server.
* Dataset#reverse and #reverse_order now accept virtual row blocks.
* Changing the name of the primary key column, and possibly other
schema changes on the primary key column, are now supported on
MySQL.
* Primary key columns are now specifically marked as NOT NULL on
SQLite, as non-integer primary keys on SQLite are not considered
NOT NULL by default.
* Failure to create a native prepared statement is now handled
better in the postgres, mysql, and mysql2 adapters.
* Firebird now emulates selecting data without an underlying table
(e.g. DB.get(1)).
* Finding the name of the constraint that sets column defaults on
Microsoft SQL Server now works correctly on JRuby 1.7.
* An additional type of disconnect error is now recognized in the
jdbc/sqlserver adapter.
* Many adapters have been fixed so that they don't raise an exception
if trying to disconnect an already disconnected connection.
* Many adapters have been fixed so that
Database#log_connection_execute logs and executes the given SQL
on the connection.
* Many adapters have been fixed so that
Database#database_error_classes returns an array of database
exception classes for that adapter.
* Database#log_exception now handles a nil exception message.
* Dataset#limit(nil, nil) now resets offset in addition to limit, but
you should still use Dataset#unlimited instead.
* A bin/sequel usage quide has been added to the documentation.
= Backwards Compatibility
* Sequel now treats clob columns as strings instead of blobs
(except on DB2 when use_clob_as_blob = true). This can make it
so the values are returned as strings instead of SQL::Blob values.
Since SQL::Blob is a String subclass, this generally will
not affect user code unless you are passing the values as input
to a separate blob column.
* The Database <-> ConnectionPool interface was completely changed.
Sequel no longer supports custom connection procs or disconnection
procs in the connection pools. The :disconnection_proc Database
option is no longer respected, and blocks passed to Database.new
are now ignored.
This change should not be user-visible, but if you had any code
that was monkeying with the connection pool internals, you may
need to modify it.
* Code that was using the uniqueness check to also check for presence
should add a separate check for presence. Such code was broken,
as it only worked if there was already a NULL column value in the
table. If you were relying on this broken behavior, you should
clean up the NULL data in the column and then mark the database
column as NOT NULL.
* If you have code that specifically abuses the fact that non-integer
primary keys on SQLite allow NULL values by default, it will no
longer work.
sequel-5.63.0/doc/release_notes/3.42.0.txt 0000664 0000000 0000000 00000005422 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#avg, #interval, #min, #max, #range, and #sum now
accept virtual row blocks, allowing you to more easily get
aggregate values of expressions based on the table:
DB[:table].sum{some_function(column1, column2)} # => 134
# SELECT sum(some_function(column1, column2)) FROM table
* Database#do has been added on PostgreSQL for using the DO
anonymous code block execution statement.
* Model.dataset_module now uses a Module subclass, which allows
you to call subset inside a dataset_module block, making
it easier to consolidate dataset method code:
class Album < Sequel::Model
dataset_module do
subset(:gold){copies_sold > 500000}
end
end
* Database#copy_table and #copy_into are now supported on
jdbc/postgres.
* Sequel now supports deferred constraints on constraint types other
than foreign keys. The only databases that appear to implement
this are Oracle and PostgreSQL.
* Sequel now supports INITIALLY IMMEDIATE deferred constraints via
the :deferrable=>:immediate constraint/column option.
* Sequel now supports setting the default size of string columns,
via the default_string_column_size option or accessor. In some
cases, Sequel's default string column size of 255 is too large
(e.g. MySQL with utf8mb4 character set), and this allows you to
change it.
= Other Improvements
* Dataset#count and other methods now use a subselect in the case
where the dataset has an offset but no limit.
* If an error occurs while attempting to commit a transaction, Sequel
now attempts to rollback the transaction. Some databases do this
automatically, but not all. Among other things, this fixes issues
with deferred foreign key constraint violations on SQLite.
* When extending a model's dataset, the model's instance_dataset is
reset, insuring that it will also be extended with the module.
* When passing an invalid argument to Dataset#filter, the exception
message now includes the argument.
* The force_encoding plugin now works with frozen string values.
* Public methods added to a model dataset_module now have model
class methods created for them even if the method was added outside
of a dataset_module block.
* On PostgreSQL, Database#indexes now includes a :deferrable entry
for each index hash, which will be true for unique indexes where
the underlying constraint is deferrable.
* On Microsoft SQL Server 2000, Dataset#update no longer includes a
limit (TOP), allowing it to work correctly.
= Backwards Compatibility
* Model.dataset_methods has been removed. This was used to store
blocks for methods created via def_dataset_method and subset.
The internals have been changed so that a dataset_module is
always used in these cases, therefore there was no longer a reason
for this method.
sequel-5.63.0/doc/release_notes/3.43.0.txt 0000664 0000000 0000000 00000007566 14342141206 0020001 0 ustar 00root root 0000000 0000000 = New Features
* A core_refinements extension has been added, which offers
refinement versions of Sequel's core extensions. This requires
the new experimental refinement support added in ruby 2.0, and
allows you to use the Sequel DSL methods in a file without
actually modifying the Symbol, String, Array, and Hash classes.
* A date_arithmetic extension has been added for performing
database-independent date calculations (adding/subtracting an
interval to/from a date):
Sequel.extension :date_arithmetic
e = Sequel.date_add(:date_column, :years=>1, :months=>2, :days=>3)
DB[:table].where(e > Sequel::CURRENT_DATE)
In addition to providing the interval as a hash, you can also
provide it as an ActiveSupport::Duration object. This extension
is supported on 11 database types.
* Dataset#get can now take an array of multiple expressions to get
an array of values, similar to map/select_map:
value1, value2 = DB[:table].get([:column1, :column2])
* Sequel can now handle [host.]database.schema.table qualified
tables on Microsoft SQL Server. To implement this support,
the split_qualifiers method has been added to Database and
Dataset for taking a possibly qualified identifier and splitting
it into an array of identifier strings.
* The string_stripper plugin now offers the ability to manually
specify which columns to skip stripping for via
Model.skip_string_stripping.
= Other Improvements
* The jdbc adapter now works with the new jdbc-* gems, which require
a manual load_driver step that the older jdbc-* gems did not
require.
* The string_stripper plugin no longer strips blob columns or values.
* Database#copy_into in both the postgres and jdbc/postgres adapters
has been fixed to better handle exceptions.
* Dataset#hash and Model#hash are now significantly faster.
* Lambda procs with 0 arity can now be used as virtual row blocks
on ruby 1.9. Previously, attempting to use a lambda proc with
0 arity as a virtual row block on ruby 1.9 would raise an exception.
* Schema-qualified composite types are now handled correctly in
the pg_row extension.
* Database#reset_primary_key_sequence on PostgreSQL now works
correctly when a default_schema is set.
* tinyint(1) unsigned columns on MySQL are now parsed as booleans
instead of integers on MySQL if converting tinyint to boolean.
* The jdbc adapter now supports the jdbc-hsqldb gem, so you can
now install that instead of having to require the .jar manually.
* Blobs are now casted correctly on DB2 when the use_clob_as_blob
setting is false.
* Oracle timestamptz types are now handled correctly in the
jdbc/oracle adapter.
* Sequel now defaults to :prefetch_rows = 100 in the oracle
adapter, which can significantly improve performance.
* Sequel now defines respond_to_missing? where method_missing? is
defined and the object also responds to respond_to?.
* Sequel::BasicObject now responds to instance_exec on ruby 1.8.
= Backwards Compatibility
* The meta_def method that was defined on Database, Dataset, and
Model classes and instances has been moved to an extension named
meta_def, and is no longer loaded by default. This method was
previously used internally, and it wasn't designed for external
use. If you have code that uses meta_def, you should now load the
extension manually:
Sequel.extension :meta_def
* The private _*_dataset_helper model association methods are no
longer defined. The AssociationReflection#dataset_helper_method
public method is also no longer defined.
* Dataset#schema_and_table now always returns strings (or nil).
Before, in some cases it would return symbols.
* Using a conditions specifier array with Dataset#get no longer
works due to the new multiple values support in Database#get.
So code such as:
DB[:table].get([[:a, 1], [:b, 2]])
should be changed to:
DB[:table].get(Sequel.expr([[:a, 1], [:b, 2]]))
sequel-5.63.0/doc/release_notes/3.44.0.txt 0000664 0000000 0000000 00000013306 14342141206 0017767 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#paged_each has been added, for processing entire datasets
without keeping all rows in memory, even if the underlying driver
keeps all query results in memory. This is implemented using
limits and offsets, and requires an order (model datasets use a
default order by primary key). It defaults to fetching 1000
rows at a time, but that can be changed via the :rows_per_fetch
option.
This method is drop-in compatible for each. Previously, the
pagination extension's each_page method could be used for a
similar purpose, but users of each_page are now encouraged to
switch to paged_each.
* Sequel now recognizes constraint violation exceptions on most
databases, and will raise specific exceptions for different
types of constraint violations, instead of the generic
Sequel::DatabaseError:
* Sequel::ConstraintViolation (generic superclass)
* Sequel::CheckConstraintViolation
* Sequel::NotNullConstraintViolation
* Sequel::ForeignKeyConstraintViolation
* Sequel::UniqueConstraintViolation
* Sequel::Postgres::ExclusionConstraintViolation
* The :dataset association option can now take accept an optional
association reflection option. Instead of doing:
Album.one_to_many :artists,
:dataset=>{Artist...}
you can now do:
Album.one_to_many :artists,
:dataset=>{|r| r.associated_dataset...}
This second form will preform better.
* Temporary views are now supported on PostgreSQL and SQLite using
the :temp option to create_view.
= Other Improvements
* Row fetching speed in the tinytds adapter has been increased by
up to 60%.
* Row fetching speed in the mysql2 adapter when using an identifier
output method has been increased by up to 50%.
* On databases where offsets are emulated via the ROW_NUMBER window
function (Oracle, DB2, Microsoft SQL Server), using an offset in
a subselect is now supported. For example, the following code
previously didn't work correctly with emulated offsets:
# Second 5 rows ordered by column2 of the second 10 rows ordered
# by column 1.
DB[:table].order(:column1).limit(10, 10).
from_self.order(:column2).limit(5, 5)
Row processing speed has been increased slightly for all adapters
that supported databases where offsets are emulated.
* Association method performance has improved by caching an
intermediate dataset. This can close to triple the performance
of the association_dataset method, and increase the performance
of the association method by close to 30%.
* Virtual Row performance has increased about 30% in the typical
case by using a shared VirtualRow instance.
* Database#create_or_replace_view is now emulated on databases that
don't support it directly by dropping the view before attempting
to create it.
* The columns_introspection extension can now introspect for simple
select * queries from subselects, and it can now use the cached
schema information in the database for simple select * queries
from tables.
* The identity_map plugin now works correctly with many-to-many
right-side composite keys.
* Dataset#last for Model datasets now works even if you don't specify
an order explicitly, giving the last entry by primary key. Note
that Dataset#first for model datasets still does not order by
default.
* The eager_each plugin no longer uses Object#extend at runtime.
* Database#remove_cached_schema is now thread-safe on non-GVL ruby
implementations.
* Connection errors in the jdbc adapter now provide slightly more
helpful messages.
* Sequel now uses the standard offset emulation code in the
jdbc/as400 adapter, instead of custom offset emulation code
specific to that adapter.
* Database#create_view with a dataset now works correctly when using
the pg_auto_parameterize extension.
* Database#columns no longer calls the row_proc.
* Dataset#schema_and_table no longer turns a literal string into a
non-literal string.
* The oracle adapter now works with a :prefetch_rows=>nil option,
which explicitly disables prefetching.
* The mock mssql adapter now sets a server_version so that more
parts of it work.
= Backwards Compatibility
* Offset emulation via ROW_NUMBER works by moving the query to a
subselect that also selects from the ROW_NUMBER window function,
and filtering on the ROW_NUMBER in the main query. Previously, the
ROW_NUMBER was also present in the output columns, and some
adapter code was needed to hide that fact. Now, the outer select
selects all of the inner columns in the subselect except for the
ROW_NUMBER, reducing the adapter code needed. This has the side
effect of potentially requiring a query (or multiple queries for
multiple subselects) to determine the columns to use. The
columns_introspection extension may reduce the number of queries
needed.
* The correlated_subquery eager limit strategy is no longer supported
on Microsoft SQL Server for many_*_many associations. As the
window_function eager limit strategy is supported there, there is
no reason to use the correlated_subquery strategy.
* The public AssociationReflection#_dataset_method method has been
removed.
* The private _*_dataset methods for associations (e.g.
_albums_dataset) have been removed.
* The private Dataset#offset_returns_row_number_column? method has
been removed.
* :conditions options for associations are now added to the
association dataset before the foreign key filters, instead of
after. This should have no effect unless you were introspecting
the dataset's opts or sql and acting on it.
* The added abilities in the columns_introspection plugin to use
cached schema for introspection can now cause it to return
incorrect results if the table's schema has changed since it was
cached by Sequel.
sequel-5.63.0/doc/release_notes/3.45.0.txt 0000664 0000000 0000000 00000017206 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* Database#transaction now recognizes a :retry_on option, which
should contain an exception class or array of exception classes.
If the transaction raises one of the given exceptions, Sequel
will automatically retry the transaction block. It's a bad idea to
use this option if the transaction block is not idempotent.
By default, Sequel only retries the block 5 times by default,
to protect against infinite looping. You can change the number
of retries with the :num_retries option.
Users of the :disconnect=>:retry option are encouraged to switch
to :retry_on=>Sequel::DatabaseDisconnectError.
* Dataset#escape_like has been added for escaping LIKE
metacharacters. This is designed for the case where part of
the LIKE pattern is based on user input that should not treat the
metacharacters specially.
* Serialization failures/deadlocks are now raised as
Sequel::SerializationFailure exception instances. This exception
class is a good candidate for the transaction :retry_on option.
* On PostgreSQL, you can now provide the :force_standard_strings
and :client_min_messages Database options to override the defaults
on a per-instance basis.
* On PostgreSQL, Database#tables and #views now recognizes a
:qualify option, which if true will return qualified identifiers
instead of plain symbols.
* Transaction isolation levels are now supported on Oracle, DB2,
and all jdbc subadapters using the JDBC transaction support.
* Dataset.def_mutation_method now accepts a :module option for
the module in which to define the methods (defaulting to self).
* An unlimited_update plugin has been added. It's sole purpose is to
eliminate a MySQL warning in replicated environments, since by
default Sequel::Model uses a LIMIT clause when updating on MySQL.
* The named_timezones extension now adds a
Sequel.tzinfo_disambiguator accessor to automatically handle
TZInfo::AmbiguousTime exceptions. This should be a callable object
that accepts two arguments, a DateTime instance and an array of
timezone periods, and returns the timezone period to use.
= Other Improvements
* Sequel now handles JSON securely, specifying the
:create_additions=>false option when using JSON.parse. If you
really want to get the old vulnerable behavior back, override
Sequel.parse_json.
* The json_serializer and xml_serializer plugins are now secure
by default. Before, the default behavior of these plugins
allowed for round tripping, such that:
Album.from_xml(album.to_xml) == album
Unfortunately, that requires that the deserialization allow
the setting of any column. Since the plugins also handle
associations, you could also set any column in any associated
object, even cascading to associated objects of those objects.
The new default behavior only allows deserialization to set
the same columns that mass-assignment would set, and not to
handle associated objects at all by default. The following
additional options are supported:
:fields :: The specific fields to set (this was already supported
by the json_serializer plugin).
:associations :: The specific associations to handle.
:all_columns :: The previous behavior of setting all columns.
:all_associations :: The previous behavior of setting all
associations.
Since JSON parsing no longer deserializes into arbitrary ruby
instances, from_json and array_from_json class methods have been
added to the json_serializer plugin, for deserializing into model
instances. These mirror the from_xml and array_from_xml class
methods in the xml_serializer plugin.
Note that the :all_columns and :all_associations methods were
only added to make backwards compatibility easier. It is
likely they will be removed in Sequel 4, along with the
json_create class method.
* Sequel now attempts to use database specific error codes or
SQLState codes instead of regexp parsing to determine if a more
specific DatabaseError subclass should be used. This should make
error handling faster and more robust.
* Sequel now uses ESCAPE '\' when using LIKE, for similar behavior
across databases. Previously, no ESCAPE clause was used, so
behavior differed across databases, with most not using escaping,
and PostgreSQL, MySQL, and H2 defaulting to backslash as the escape
character.
* The query extension has been reimplemented and now uses a proxy
object instead of Object#extend.
* The :pool_timeout Database option now supports fractional seconds.
* Database#quote_identifier is now a public method.
* Metadata parsing (schema, indexes, foreign_key_list) on PostgreSQL
now correctly handles the case where an unqualified table name is
used and tables with that name exist in multiple schemas. It now
picks the first matching table in the schema_search_path, instead of
failing or returning results from all tables.
* Sequel::Model instances no longer attempt to typecast the money
type on PostgreSQL, since the previous typecast didn't work
correctly, and correct typecasting is locale-dependent.
* Sequel no longer picks up foreign keys for tables in other
databases when using Database#foreign_key_list on MySQL.
* A warning when using the mysql2 3.12 beta has been eliminated.
* A warning has been eliminated when using the jdbc/oracle adapter
on JRuby 1.7.
* Sequel's ilike emulation should now work by default on databases
without specific syntax support.
* Dataset#from_self! no longer creates a self referential dataset.
* Coverage testing now uses simplecov instead of rcov on ruby 1.9+.
= Backwards Compatibility
* The switch to using JSON.parse :create_additions=>false means
that if your app expected JSON to deserialize into arbitrary
ruby objects, it is probably broken. You should update your
application code to manually convert the deserialized hashes
into the ruby objects you want.
Note that it's not just this new version of Sequel that will
cause that, older versions of Sequel will break in the same
way if you update your JSON library to a version that is not
vulnerable by default.
This potentially affects the pg_json extension and serialization
plugin if you were expecting the JSON stored in the database
to be deserialized into arbitrary ruby objects.
See the json_serializer/xml_serializer changes mentioned in
the Other Improvements section.
* The reimplemented query extension is not completely backwards
compatible. For example, inside a query block, self refers to the
proxy object instead of a dataset, and calling methods that return
rows no longer raises an exception.
* The metadata parsing methods on PostgreSQL no longer work with
unqualified tables where the table is not in the schema search
path. This makes metadata parsing consistent with how datasets
operate. For tables outside the schema search path, you must
qualify it before use now.
Additionally, using a nonexistent table name will raise an
exception instead of returning empty results in some cases.
* The Dataset#def_mutation_method instance method has been removed.
This method added mutation methods directly on the dataset instance,
which is generally not desired. Using the def_mutation_method class
method with the :module option is now the recommended approach.
* The switch to using ESCAPE for LIKE characters is backwards
incompatible on databases that don't use escaping by default,
when backslash is used in a LIKE pattern as a regular character.
Now you have to double the backslash in the pattern.
* Database#database_error_regexps private method now can return any
enumerable yielding regexp/exception class pairs, it is no longer
specified to return a hash.
sequel-5.63.0/doc/release_notes/3.46.0.txt 0000664 0000000 0000000 00000010430 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#first! has been added. This is identical to #first,
except where #first would return nil due to no row matching,
#first! raises a Sequel::NoMatchingRow exception. The main
benefit here is that a standard exception class is now used,
so external libraries can deal with these exceptions appropriately
(such as web applications returning a 404 error).
* Dataset#with_pk! has been added to model datasets. Similar to
#first!, this raises a Sequel::NoMatchingRow exception instead of
returning nil if there is no matching row.
* A drop_foreign_key method has been added to the alter_table
generator:
alter_table(:tab){drop_foreign_key :col}
This relies on foreign_key_list working and including the name
of the foreign key. Previously, you'd have to drop the foreign key
constraint before dropping the column in some cases.
* Column constraints can now be named using :*_constraint_name
options:
create_table(:tab) do
primary_key :id, :primary_key_constraint_name=>:pk_name
foriegn_key :t_id, :t, :foreign_key_constraint_name=>:fk_name,
:unique=>true, :unique_constraint_name=>:uk_name
end
This makes it easier to name constraints, which has always been
recommended as it makes it easier to drop such constraints in the
future.
* On Microsoft SQL Server, Dataset#cross_apply and #outer_apply have
been added to use CROSS/OUTER APPLY. These are useful if you
want to join a table to the output of a function that takes the
table as an argument.
= Other Improvements
* The connection pools are now faster when using the
:connection_handling=>:queue option.
* External connection pool classes can now be loaded automatically by
the :pool_class option.
* Database#each_server now raises if not given a block. Previously,
it just leaked Database references.
* On Microsoft SQL Server, ] characters are now escaped correctly in
identifiers.
* On PostgreSQL, infinite dates are also handled when using
Database#convert_infinite_timestamps. Previously, infinite dates
were incorrectly converted to 0000-01-01.
* The associations, composition, serialization, and dirty plugins
now clear caches stored in the instance in some additional cases,
such as when saving model instances when the dataset supports
insert_select.
* Model#validates_type in the validation_helpers plugin now handles
false values correctly.
* The string_stripper plugin has been fixed to not change the result
of Model.set_dataset.
* You can now drop primary key constraints on H2, using:
alter_table(:tab){drop_constraint :foo, :type=>:primary_key}
* The jdbc/as400 adapter has been fixed, it was broken starting in
Sequel 3.44.0.
* A Security guide has been added explaining various security issues
to think about when using Sequel.
= Backwards Compatibility
* The change to make associations, composition, serialization, and
dirty now clear caches after saving when the dataset supports
insert_select can break code that expected the previous behavior.
For example:
artist = Artist[1]
artist.has_albums # => false
album = Album.new(:artist=>artist)
def album.after_create
super
artist.update(:has_albums=>true)
end
album.save
artist.has_albums # => false
Such code should either refresh the artist after saving the album,
or use album.artist.has_albums. You already had to do that if
the dataset did not support insert_select; the impetus for this
change was to make the behavior consistent.
* Decimal/numeric columns are now strictly typecast by default,
similar to integer and real/double precision columns. If you want
the previous loose typecasting to for decimal/numeric columns,
use the looser_typecasting extension.
* External adapters that called Database.set_adapter_scheme with a
string should change to using a symbol.
* Dataset#select_map, #select_order_map, and #get now raise an
exception if they are passed a plain string inside an array.
If you do want to use a plain string, you now need to alias it:
dataset.get([Sequel.as('string', :some_alias)])
= Sequel 4 Implementation Planning
* Sequel 4 implementation planning has begun. If you want to view
and/or provide feedback on the implementation plan, see
https://github.com/jeremyevans/sequel-4-plans
sequel-5.63.0/doc/release_notes/3.47.0.txt 0000664 0000000 0000000 00000026257 14342141206 0020003 0 ustar 00root root 0000000 0000000 = New Plugins
* An auto_validations plugin has been added, which automatically
adds not null, type, and unique validations based on information
obtained from parsing the database schema. If you don't
require customization of the validation error message per
column, this can significantly DRY up validation code. Currently
this plugin requires the database support index parsing; that
restriction will be removed in Sequel 4.
* An input_transformer plugin has been added, for automatically
running a transformation proc on all model column setter
input before use. This is a generalization of the
string_stripper plugin, allowing arbitrary modifications
to the input.
* An error_splitter plugin has been added, for splitting validation
errors applying to multiple columns into a separate validation
error per column. This is useful if you want to to include such
errors when using Errors#on to get all errors on the column. In
general, only uniqueness errors apply to multiple columns, so
those are the only errors likely to be affected.
= Other New Features
* Database.extension has been added, allowing you to load an
extension into all future databases. This is similar to loading a
plugin into Sequel::Model itself. For example, if you want all
Database instances to use the query_literals extension, run the
following before creating your Database instances:
Sequel::Database.extension :query_literals
* Database.after_initialize has been added for running a hook
on all new databases created.
* Model.default_set_fields_options has been added, allowing you
to set the default options for the #set_fields and #update_fields
methods. This is useful if you want to make :missing=>:raise
or :missing=>:skip the default behavior.
* The :setter, :adder, :remover, and :clearer association options
have been added. These allow you to override the default
implementation used to modify the association. :setter affects
the *_to_one setter method, :adder the *_to_many add_* method,
:remover the *_to_many remove_* method, and :clearer the
*_to_many remove_all_* method.
Previously, you had to override a private method to get the same
behavior, this just offers a nicer API for that.
* A :keep_reference Database option has been added. When set to
false, a reference to the Database instance is not kept in
Sequel::DATABASES. This is designed for Database instances
created by libraries, so they don't accidentally get chosen as
the default Sequel::Model database.
* Model#modified! now accepts a column and marks that column
as changed. This is useful if you plan on mutating the column
value as opposed to reassigning it.
* Model#modified? now accepts a column and returns whether the
column has been changed.
* The migrators now support an :allow_missing_migration_files
option, which makes them silently ignore errors related to
missing migration files.
* validates_schema_types has been added to validation_helpers,
which validates that the column values are instances of the
expected ruby type for the given database schema type. This
is a more robust version of the validates_not_string
extension, and users of validates_not_string are encouraged
to switch soon, as validates_not_string is going away in
Sequel 4.
validates_schema_type has been added to validation_class_methods,
which preforms the same validation, but it requires the columns
be listed explicitly.
validates_type in validation_helpers has been expanded to
accept an array of allowable classes.
Related to this is the addition of Database#schema_type_class for
returning the type class(es) for the given schema type symbol.
* validates_not_null has been added to the validation_helpers
plugin. This is similar to the validates_presence validation,
but only checks for nil values, allowing empty/blank strings.
* In the caching plugin, when the :ignore_exceptions option is true,
exceptions raised when deleting an object from the cache are now
ignored correctly.
* On PostgreSQL, Sequel now supports a :search_path Database
option to automatically set the client connection search_path.
This allows you to control which schemas do no require
qualification, and in which order to check schemas when
referencing unqualified objects. If you were using the
default_schema setting, it is recommended that you switch
to using :search_path instead.
* The pg_array extension can now register array types on a
per-Database basis via Database#register_array_type. Previously,
only global registration of array types was allowed. Additionally,
when registering array types on a per-Database basis, the oids can
be looked up automatically, making it possible to register array
types with just a type name:
DB.register_array_type(:interval)
* The pg_array extension now automatically creates conversion
procs for array types of all named types used by the
database. This means that if you use the pg_array and
pg_hstore extensions, the hstore[] type is now handled
correctly.
* The postgres adapter now supports :use_iso_date_format and
:convert_infinite_timestamps Database options. Previously,
use_iso_date_format was only a global setting, and
convert_infinite_timestamps could only be set after
initialization.
* Database#supports_schema_parsing? has been added to check
if schema parsing via the Database#schema method is
supported.
= Other Improvements
* A race condition related to prepared_sql for newly prepared
statements has been fixed.
* Dataset#get now works correctly if given an array with multiple
columns if there were no returned rows.
* The plugins that ship with Sequel now handle frozen model instances
correctly.
* Freezing of model instances now works correctly for models without
primary keys.
* Database constraints added with the constraint_validations
plugin now handle NULL values correctly if the :allow_nil=>true
setting is used.
* The pagination, pretty_table, query, schema_caching,
schema_dumper, and select_remove extensions can now be
loaded by Database#extension. If you are loading them
globally via Sequel.extension, switch to using
Database#extension, since that will be required starting
in Sequel 4.
* The lazy_attributes plugin no longer uses the identity_map plugin
internally, and eager loading lazy attributes now works correctly
without an active identity map.
* The many_to_one_pk_lookup plugin now handles many more corner
cases, and should be safe to enable by default.
* The static_cache plugin now has optimized implementations of
Model.map, .to_hash, and .to_hash_groups which work without a
database query. Model.count without arguments has also been
optimized to not require a database query.
* Fetching new records has been made faster when using the
update_primary_key plugin, since it was changed to cache the primary
key values lazily.
* When using the update_primary_key plugin, if the primary key
changes, clear the associations cache of all non-many_to_one
associations (since those will likely be based on the primary
key).
* The pg_typecast_on_load plugin no longer errors if given a
column that doesn't have a matching oid conversion proc.
* Handling of domain types on PostgreSQL has been significantly
improved. Domain type columns now have correct model
typecasting, and the pg_row extension correctly sets up
conversion procs for domain types inside composite types.
* Postgres::HStoreOp#- now automatically casts string input to
text, so that PostgreSQL doesn't assume the string is an
hstore.
* Postgres::PGRangeOp#starts_before and #ends_after have been
renamed to #ends_before and #starts_after. The previous
names were misleading. The old names are still available
for backwards compatibility, but they will be removed in the
Sequel 4.
* The pg_row plugin now handles aliased tables correctly.
* Model#validate in the validation_class_methods plugin no
longer skips validate methods in superclasses or previously
loaded plugins.
* Loading the touch plugin into a model subclass after it has
been loaded into a model superclass no longer ignores
inherited touched associations.
* Sequel no longer resets the conversion procs for the
Database instance when using Databaset#extension to load a
pg_* extension that adds global conversion procs. Instead,
the global conversion procs are added to the instance-specific
conversion procs. The result of this is that manually added
conversion procs will not be lost if an extension is loaded
afterward.
* The jdbc adapter now references the driver class before loading
subadapter specific code, which can fix issues if the database
tries to connect on initialization (such as the jdbc/postgres
adapter if the pg_hstore extension is loaded previously).
* A guide describing Sequel's support for advanced PostgreSQL
features has been added.
= Backwards Compatibility
* If you have already used the constraint_validations plugin to
create validations with the :allow_nil=>true option, you should
drop and regenerate those constraints to ensure they handle NULL
values correctly.
* The change to make PostgreSQL automatically handle domain
types can break previous code that set up special conversions
and typecasts per domain type. In the schema parsing, if you
want to get the domain type information, it will be contained
in the :db_domain_type and :domain_oid schema entries.
* Sequel::Postgres.use_iso_date_format is now only defined if
you are using the postgres adapter. Previously, it could
be defined when using other adapters with a pg_* extension,
even though the setting had no effect in that case.
* The validation_class_methods plugin now copies validations into
the subclass upon inheritance, instead of recursing into the
superclass on validation. This makes it more similar to how
all the other Sequel plugins work. However, it also means that
if you add validations to a superclass after creating a
subclass, the subclass won't have those validations. Additionally
if you skip superclass validations in a child class after creating
a grandchild class, the grandchild class could still have the
parent class's validations.
* The validates_unique validation in validation_helpers no longer
attempts to do the uniqueness query if the underlying columns
have validation errors. The reasoning behind this is that if the
underlying columns are not valid, the uniqueness query can cause
a DatabaseError.
* If you were passing strings in hstore format to
Postgres::HStoreOp#-, you should manually cast them to hstore:
hstore_op - Sequel.cast('a=>b', :hstore)
* The default validation error message for validates_type has been
modified.
* Database#schema_column_type was made public accidently by an
adapter and a few extensions. That has been fixed, but if you
were calling it with an explicit receiver and it happened to
work by accident before, you'll need to update your code.
= Sequel 4 Implementation Planning
* Sequel 4 implementation work will begin shortly. All Sequel users
are encouraged to read about the proposed changes and provide
feedback on the implementation plan. For details, see
https://github.com/jeremyevans/sequel-4-plans.
sequel-5.63.0/doc/release_notes/3.48.0.txt 0000664 0000000 0000000 00000041160 14342141206 0017772 0 ustar 00root root 0000000 0000000 = Deprecation Warnings
The main change in Sequel 3.48.0 is the deprecation of Sequel
features that will be modified, moved, or removed in Sequel 4.
For the reasoning behind these changes, please review the
commits logs at
https://github.com/jeremyevans/sequel-4-plans/commits/master
== Deprecation Logging
If you use a deprecated method or feature, Sequel will by default
print a deprecation message and 10 lines of backtrace to stderr
to easily allow you to figure out which code needs to be
updated. You can change where the deprecation messages go and how
many lines of backtrace are given using the following:
# Log deprecation information to a file
Sequel::Deprecation.output = File.open('deprecated.txt', 'wb')
# Turn off all deprecation logging
Sequel::Deprecation.output = nil
# Use 5 lines of backtrace when logging deprecation messages
Sequel::Deprecation.backtrace_filter = 5
# Use all backtrace lines when logging deprecation messages
Sequel::Deprecation.backtrace_filter = true
# Don't include backtraces in the deprecation logging
Sequel::Deprecation.backtrace_filter = false
# Select which backtrace lines to output
Sequel::Deprecation.backtrace_filter = \
lambda{|line, line_no| line_no < 3 || line =~ /my_app/}
== Major Change
* The core extensions will no longer be loaded by default. You will
have to use `Sequel.extension :core_extensions` to load the core
extensions.
* The Symbol#[] and Symbol#{<,>,<=,>=} methods will no longer be
provided by the core extensions on ruby 1.8. You will have to
use `Sequel.extension :ruby18_symbol_extensions` to use them.
== Core Behavior Changes
* Dataset#filter becomes an alias for #where, and #exclude
becomes an alias for #exclude_where. You will have to
use `DB.extension :filter_having` to get the previous behavior.
Dataset#and and #or will also only affect the WHERE clause.
* Dataset#and, #or, and #invert will not raise errors for no existing
filter.
* Dataset#select_more becomes an alias for #select_append.
* Dataset#select and #from will not longer consider a hash argument as
an alias specification. You will have to use
`DB.extension :hash_aliases` to get the previous behavior.
* Database#dataset and Dataset.new will not take an options hash.
* Database#transaction :disconnect=>:retry option will be removed.
* Calling Dataset#add_graph_aliases before #graph or #set_graph_aliases
will raise an Error.
* Datasets will have a frozen options hash by default.
* Dataset#set_overrides and #set_defaults will move to the
set_overrides extension.
* Sequel.empty_array_handle_nulls will be removed. To get the
empty_array_handle_nulls = false behavior, you will have to
use `DB.extension :empty_array_ignore_nulls`.
* The second argument to Dataset #union, #intersect, and #except must
be an options hash if it is given.
* The fourth argument to Dataset #join_table must be an options
hash if it is given.
* Using a mismatched number of placeholders and arguments in a
placeholder literal string will raise an error.
* Dataset#graph_each will move to the graph_each extension.
* Database#default_schema will be removed.
* Dataset#[]= will be moved to the sequel_3_dataset_methods
extension.
* Dataset#insert_multiple will be moved to the
sequel_3_dataset_methods extension.
* Dataset#set will be moved to the sequel_3_dataset_methods
extension.
* Dataset#to_csv will be moved to the sequel_3_dataset_methods
extension.
* Dataset#db= and #opts= setters will be moved to the
sequel_3_dataset_methods extension.
* Dataset#qualify_to and #qualify_to_first_source will be moved to
the sequel_3_dataset_methods extension.
* Remove default methods that raise Sequel::NotImplemented:
Database#connect, #execute, #foreign_key_list, #indexes, #tables,
and #views, and Dataset#fetch_rows.
* Sequel::SQL::Expression#to_s will be removed.
* All Dataset methods in Dataset::PUBLIC_APPEND_METHODS except for
#literal, #quote_identifier, and #quote_schema_table will be
removed.
* All Dataset methods in Dataset::PRIVATE_APPEND_METHODS will
be removed.
* Sequel k_require, ts_require, tsk_require, and
check_requiring_thread will be removed.
* Dataset.def_append_methods will be removed.
* Dataset#table_ref_append will be removed.
* Sequel.virtual_row_instance_eval accessor will be removed.
* Database#reset_schema_utility_dataset will be removed.
== Adapter Behavior Changes
* The Database#do method will be removed from the ado, db2, dbi,
informix, odbc, openbase, and oracle adapters.
* The jdbc adapter will raise an error when parsing the schema
for a table if it detects results for the same table name in
multiple schemas.
* The Database#query method will be removed from the informix
adapter.
* Dataset#lock on PostgreSQL will check the given lock mode.
* Sequel will check the client_min_messages setting before
use on PostgreSQL.
* Prepared statement placeholders on PostgreSQL will no longer
support implicit casting via :$x__type.
== Extension Behavior Changes
* The following extensions will no longer make global changes to
the Database and Dataset classes: null_dataset, pagination,
pretty_table, query, schema_caching, schema_dumper,
select_remove, and to_dot. These will be changed to
Database/Dataset specific extensions.
* The pg_auto_parameterize and pg_statement_cache extensions will
be removed.
* Sequel::Dataset.introspect_all_columns will be removed from the
columns_introspection extension.
* PGRangeOp#starts_before and #ends_after will be removed from the
pg_range_ops extension.
== Model Behavior Changes
* Model#initialize will accept only one argument.
* The after_initialize hook will be moved to a plugin.
* Move blacklist-based security methods (#set_except, #update_except,
.set_restricted_columns) to a plugin.
* The :eager_loader and :eager_grapher association option procs will
always be passed a hash.
* Model string column setters will consider array and hash input to
be invalid.
* Remove save taking multiple arguments for the columns to save.
Add Model#save :columns option for saving specific columns.
* Don't automatically choose a reciprocal association with a condition
or block.
* Don't automatically set up reciprocal associations if multiple ones
match.
* Model::Errors#[] will no longer modify the receiver. If you want
autovivification, use the active_model plugin.
* Model.set_primary_key will not longer accept composite keys as
multiple arguments.
* The correlated_subquery eager limit strategy will be removed.
* The following Model class dataset methods will be removed: print,
each_page, paginate, set, add_graph_aliases, insert_multiple, query,
set_overrides, set_defaults, to_csv.
* The Model.{destroy,delete,update} class dataset methods will be
moved to the scissors plugin.
* Model#pk_or_nil will be removed.
* Model#set_values will no longer be called directly by any Sequel
code, and overriding it is deprecated. It will be removed in Sequel
4.1.
* Model.cache_anonymous_models accessor will move to Sequel module.
* Model::InstanceMethods.class_attr_overridable and
.class_attr_reader will be removed.
* The :one_to_one option check for one_to_many associations will
be removed.
== Plugin Behavior Changes
* Public dataset methods will no longer have class methods
automatically added.
* The validates_not_string validation will be removed from the
validation_class_methods and validation_helpers plugin.
* In the json_serializer plugin, the to_json :root=>true option
means :root=>:collection instead of :root=>:both.
* In the json_serializer plugin, the to_json :naked option will
default to true, and there will not be way to add the JSON.create_id
automatically.
* In the json_serializer plugin, from_json will no longer automatically
delete the JSON.create_id key from the input hash.
* The #to_json and #to_xml :all_columns and :all_associations options
in the json_serializer and xml_serializer plugins will be removed.
* The Model.json_create method will be removed from the
json_serializer plugin.
* The validates_type validation will raise validation errors for nil
if :allow_nil=>true is not used.
* auto_validate_presence_columns will be removed from the
auto_validations plugin
* The identity_map plugin will be removed.
== Internal Changes
* The sequel_core.rb and sequel_model.rb files will be removed.
* Dataset#{quote_identifiers,identifier_output_method,
identifier_input_method} will assume Database implements the
methods.
= Forwards Compatibility
Not all changes planned in Sequel 4 have deprecation warnings.
The following changes will be made in Sequel 4 but do not have
deprecation warnings in 3.48.0:
* The threaded connection pools will default to
:connection_handling=>:queue. You can manually set
:connection_handling=>:stack to get the current behavior.
* Dataset#join_table will default to :qualify=>:deep. You can
manually set :qualify=>:symbol to get the current behavior. This
can be set at a global level by overriding
Dataset#default_join_table_qualification.
* Model.raise_on_typecast_failure will default to false. Set this to
true to get the current behavior of raising typecast errors in the
setter methods.
* Model#save will no longer call Model#_refresh or Model#set_values
internally after an insert. Manually refreshes will be treated
differently than after creation refreshes in Sequel 4.
* On SQLite, integer_booleans will be true by default. Set this to
false to get the current behavior of 't' for true and 'f' for false.
* On SQLite, use_timestamp_timezones will be false by default. Set
this to true to get the current behavior with timezone information
in timestamps.
* The default value for most option hash arguments will be an empty
frozen hash. If you are overriding methods and modifying option
hashes, fix your code.
* The defaults_setter plugin will work in a lazy manner instead of
an eager manner. If you must have the values hash contain defaults
for new objects (instead of just getting defaults from getter
methods), you'll need to fork the current plugin.
* Model#set_all will allow setting the primary key columns.
* The many_to_one_pk_lookup plugin will be integrated into the
default associations support.
* The association_autoreloading plugin will be integrated into the
default associations support.
* Plugins will extend the class with ClassMethods before including
InstanceMethods in the class.
* Dataset#get, #select_map, and #select_order_map will automatically
add aliases for unaliased expressions if given a single expression.
* Database#tables and #views on PostgreSQL will check against
the current schemas in the search path.
* Sequel::SQL::SQLArray alias for ValueList will be removed.
* Sequel::SQL::NoBooleanInputMethods will be removed.
* Sequel::NotImplemented will be removed.
* Sequel::Model::EMPTY_INSTANCE_VARIABLES will be removed.
* Sequel will no longer provide a default database for the adapter or
integration specs.
= New Features
* You can now choose which Errors class to use on a per model basis
by overriding Model#errors_class.
* The following Database methods have been added to check for support:
supports_index_parsing?, supports_foreign_key_parsing?,
support_table_listing?, supports_view_listing?.
* The pg_hstore_ops extension now integrates with the pg_array,
pg_hstore, and pg_array_ops extensions, allowing you to pass in
arrays and hashes to be treated as PGArrays and HStores, and
returning ArrayOps for PostgreSQL functions/operators that
return arrays.
* Sequel.object_to_json and Sequel.json_parser_error_class
have been added and all internal json usage uses them, so you
can now override these methods if you want to use an alternative
json library with Sequel.
* The association_proxies plugin now accepts a block allowing the
user control over which methods are proxied to the dataset or
the cached array of instances. You can base the decision on
where to send the method using a variety of factors including
the method name, the method arguments, the state of the current
instance, or the related association. Here's an example of a
simple case just depending on the name of the method;
Model.plugin :association_proxies do |opts|
[:find, :where, :create].include?(opts[:method])
end
If the block returns true, the method is sent to the dataset,
otherwise it is sent to the array of associated objects.
* The auto_validations plugin now accepts a :not_null=>:presence
option, for doing a presence validation instead of a not_null
validation. This is useful for databases with NOT NULL
constraints where you also want to disallow empty strings.
* The auto_validations plugin now validates against explicit nil
values in NOT NULL columns that have defaults.
* The constraint_validations plugin now reflects validations, using
Model.constraint_validation_reflections.
Model.constraint_validation_reflections[:column]
# => [[:presence, {}],
# [:max_length, {:argument=>255, :message=>'just too long'}]]
* The constraint_validations plugin can now be set to pass specific
validations options to the validation_helpers plugin. This can be
useful if using the auto_validations plugin with this plugin to
avoid duplicate error messages for nil values:
Model.plugin :constraint_validations,
:validates_options=>{:presence=>{:allow_nil=>true}}
* The named_timezones extension can now be loaded as a database
extension, which allows for automatic conversions of string
timezones:
DB.extension :named_timezones
DB.timezone = 'America/Los_Angeles'
* Offsets are now emulated by Microsoft Access using a combination
of reverse orders and total counts. This is slow, especially on
large datasets, but probably better than having no support at all.
It is also possible to use the same code to support Microsoft
SQL Server 2000, but as Sequel does not support that (minimum
supported version is 2005), you have to do it manually:
Sequel.require 'adapters/utils/emulate_offset_with_reverse_and_count'
DB.extend_datasets Sequel::EmulateOffsetWithReverseAndCount
= Other Improvements
* Dataset#clone is now faster.
* Database methods that create datasets (fetch, from, select, get)
are now faster.
* Model.with_pk and .with_pk! are now faster.
* Dataset#or now just clones if given an empty argument, similar
to Dataset#where.
* Sequel now correctly frees statements after using them in the
ibmdb adapter. Previously, they weren't freed until GC, which
could result in errors if all available handles are in use.
* Dataset creation is now faster on Microsoft SQL Server.
* The mediumint and mediumtext types are now recognized on MySQL.
* The ado adapter now handles disconnecting an already disconnected
connection.
* The auto_validations plugin now works on databases that don't
support index parsing. However, it will not set up automatic
uniqueness validations on such databases.
* The validation_helpers is more strict in some cases when checking
for nil values, using a specific nil check instead of general
falsely check.
* The table inheritance plugins now correctly handle usage of
set_dataset in a subclass.
* The bin/sequel command line tool now has specs.
= Backwards Compatibility
* Sequel now uses aliases for many internal Dataset#get calls, such
as those used by table_exists? and max.
* Sequel now no longer uses class variables internally. Instead,
instance variables of the Sequel::Database class are used.
* Sequel now sets up the identifier mangling methods on Database
initialization instead of on first use.
* The private Database#adapter_initialize method has been added for
per adapter configuration. All internal adapters have been switched
to use this method instead of overridding initialize, and all
external adapters should as well. This makes sure that Database
instances are not added to Sequel::DATABASES until they have been
completely initialized.
* Virtual row blocks no longer convert their return values to an array.
Among other things, this means that having a virtual row block return
a hash works as expected.
* The private Dataset#hash_key_symbol method now only takes a single
argument.
* Database#constraint_validations in the constraint_validations plugin
now returns raw hash rows, instead of arrays of validation method
call arguments.
* Dataset#count now uses a lowercase count function in the SQL.
* Passing a non-String or Hash as the first argument to an adapter
method (e.g. Sequel.postgres(1)) now raises an error. Before, this used
to work on some adapters that implicitly converted the database
name to a string.
* The stats and dcov rake tasks were removed.
sequel-5.63.0/doc/release_notes/3.5.0.txt 0000664 0000000 0000000 00000050414 14342141206 0017705 0 ustar 00root root 0000000 0000000 New Plugins
-----------
* A class_table_inheritance plugin has been added, supporting model
inheritance in the database using a table-per-model-class approach.
Each table stores only attributes unique to that model or subclass
hierarchy.
For example, with this hierarchy:
Employee
/ \
Staff Manager
|
Executive
the following database schema may be used (table - columns):
* employees - id, name, kind
* staff - id, manager_id
* managers - id, num_staff
* executives - id, num_managers
The class_table_inheritance plugin assumes that the main table
(e.g. employees) has a primary key field (usually
autoincrementing), and all other tables have a foreign key of the
same name that points to the same key in their superclass's table.
For example:
* employees.id - primary key, autoincrementing
* staff.id - foreign key referencing employees(id)
* managers.id - foreign key referencing employees(id)
* executives.id - foreign key referencing managers(id)
When using the class_table_inheritance plugin, subclasses use joined
datasets:
Employee.dataset.sql # SELECT * FROM employees
Manager.dataset.sql # SELECT * FROM employees
# INNER JOIN managers USING (id)
Executive.dataset.sql # SELECT * FROM employees
# INNER JOIN managers USING (id)
# INNER JOIN executives USING (id)
This allows Executive.all to return instances with all attributes
loaded. The plugin overrides deleting, inserting, and updating
in the model to work with multiple tables, by handling each table
individually.
This plugin allows and encourages the use of a :key option to mark
a column holding the class name. This allows methods on the
superclass to return instances of specific subclasses.
a = Employee.all # [<#Staff>, <#Manager>, <#Executive>]
This plugin requires the lazy_attributes plugin and uses it to
handle subclass specific attributes that would not be loaded
when calling superclass methods (since those wouldn't join
to the subclass tables). For example:
a.first.values # {:id=>1, name=>'S', :kind=>'Staff'}
a.first.manager_id # Loads the manager_id attribute from the
# database
The class_table_inheritance plugin requires JOIN USING and
therefore is not supported on H2 or Microsoft SQL Server, which do
not support that SQL-92 feature.
* An associations_dependencies plugin was added for deleting,
destroying, or nullifying associated objects when destroying a
model object. This just gives an easy way to add the necessary
before and after destroy hooks. The following association types
support the following dependency actions:
* :many_to_many - :nullify (removes all related entries in join
table)
* :many_to_one - :delete, :destroy
* :one_to_many - :delete, :destroy, :nullify (sets foreign key to
NULL for all associated objects)
This plugin works directly with the association datasets and does
not use any cached association values. The :delete action will
delete all associated objects from the database in a single SQL
call. The :destroy action will load each associated object from the
database and call the destroy method on it.
The plugin call takes a hash of association symbol keys and
dependency action symbol values. Alternatively, you can specify
additional dependencies later using add_association_dependencies:
Business.plugin :association_dependencies, :address=>:delete
# or:
Artist.plugin :association_dependencies
Artist.add_association_dependencies :albums=>:destroy,
:reviews=>:delete, :tags=>:nullify
* A force_encoding plugin was added that forces the encoding of
strings used in model instances. When model instances are loaded
from the database, all values in the hash that are strings are
forced to the given encoding. Whenever you update a model column
attribute, the resulting value is forced to a given encoding if the
value is a string. There are two ways to specify the encoding.
You can either do so in the plugin call itself, or via the
forced_encoding class accessor:
class Album < Sequel::Model
plugin :force_encoding, 'UTF-8'
# or
plugin :force_encoding
self.forced_encoding = 'UTF-8'
end
This plugin only works on ruby 1.9, since strings don't have
encodings in 1.8.
* A typecast_on_load plugin was added, for fixing bad database
typecasting when loading model objects. Most of Sequel's database
adapters don't have complete control over typecasting, and may
return columns that aren't typecast correctly (with correct being
defined as how the model object would typecast the same column
values).
This plugin modifies Model.load to call the setter methods (which
typecast by default) for all columns given. You can either specify
the columns to typecast on load in the plugin call itself, or
afterwards using add_typecast_on_load_columns:
Album.plugin :typecast_on_load, :release_date, :record_date
# or:
Album.plugin :typecast_on_load
Album.add_typecast_on_load_columns :release_date, :record_date
If the database returns release_date and record_date columns as
strings instead of dates, this will ensure that if you access those
columns through the model object, you'll get Date objects instead of
strings.
* A touch plugin was added, which adds Model#touch for updating an
instance's timestamp, as well as touching associations when an
instance is updated or destroyed.
The Model#touch instance method saves the object with a modified
timestamp. By default, it uses the :updated_at column, but you can
set which column to use. It also supports touching of associations,
so that when the current model object is updated or destroyed, the
associated rows in the database can have their modified timestamp
updated to the current timestamp. Example:
class Album < Sequel::Model
plugin :touch, :column=>:modified_on, :associations=>:artist
end
* A subclasses plugin was added, for recording all of a models
subclasses and descendent classes. Direct subclasses are available
via the subclasses method, and all descendent classes are available
via the descendents method:
c = Class.new(Sequel::Model)
c.plugin :subclasses
sc1 = Class.new(c)
sc2 = Class.new(c)
ssc1 = Class.new(sc1)
c.subclasses # [sc1, sc2]
sc1.subclasses # [ssc1]
sc2.subclasses # []
ssc1.subclasses # []
c.descendents # [sc1, ssc1, sc2]
The main use case for this is if you want to modify all models
after the model subclasses have been created. Since mutable
options are copied when subclassing, modifying parent classes
does not affect current subclasses, only future ones. The
subclasses plugin allows you get all subclasses so that you can
easily modify them. The plugin only records subclasses
created after the plugin call, though.
* An active_model plugin was added, giving Sequel::Model an
ActiveModel complaint API, in so much as it passes the
ActiveModel::Lint tests.
New Extensions
--------------
* A named_timezones extension was added, allowing you to use named
timezones such as "America/Los_Angeles" (the default Sequel
timezone support only supports UTC or local time). This extension
requires TZInfo. It also sets the Sequel.datetime_class to
DateTime, so database timestamps will be returned as DateTime
instances instead of Time instances. This is because ruby's
Time class doesn't support timezones other than UTC and local time.
This plugin allows you to pass either strings or TZInfo::Timezone
instance to Sequel.database_timezone=, application_timezone=, and
typecast_timezone=. If a string is passed, it is converted to a
TZInfo::Timezone using TZInfo::Timezone.get.
Let's say you have the database server in New York and the
application server in Los Angeles. For historical reasons, data
is stored in local New York time, but the application server only
services clients in Los Angeles, so you want to use New York
time in the database and Los Angeles time in the application. This
is easily done via:
Sequel.database_timezone = 'America/New_York'
Sequel.application_timezone = 'America/Los_Angeles'
Then, before timestamps are stored in the database, they are
converted to New York time. When timestamps are retrieved from the
database, they are converted to Los Angeles time.
* A thread_local_timezones extension was added. This allows you to
set a per-thread timezone that will override the default global
timezone while the thread is executing. The main use case is for
web applications that execute each request in its own thread, and
want to set the timezones based on the request. The most common
example is having the database always store time in UTC, but have
the application deal with the timezone of the current user. That
can be done with:
Sequel.database_timezone = :utc
# In each thread:
Sequel.thread_application_timezone = current_user.timezone
This extension is designed to work with the named_timezones
extension.
* An sql_expr extension was added that adds .sql_expr methods to
all objects, giving them easy access to Sequel's DSL:
1.sql_expr < :a # 1 < a
false.sql_expr & :a # FALSE AND a
true.sql_expr | :a # TRUE OR a
~nil.sql_expr # NOT NULL
"a".sql_expr + "b" # 'a' || 'b'
Proc#sql_expr uses a virtual row:
proc{[[a, b], [a, c]]}.sql_expr | :x
# (((a = b) AND (a = c)) OR x)
* A looser_typecasting extension was added, for using to_f and to_i
instead of the more strict Kernel.Float and Kernel.Integer when
typecasting floats and integers. To use it, you should extend the
database with the Sequel::LooserTypecasting module after loading
the extension:
Sequel.extension :looser_typecasting
DB.extend(Sequel::LooserTypecasting)
This makes the behavior more like ActiveRecord:
a = Artist.new(:num_albums=>'a')
a.num_albums # => 0
Other New Features
------------------
* Associations now support composite keys. All of the :*key options
options now accept arrays of symbols instead of plain symbols.
Example:
Artist.primary_key # [:name, :city]
Album.many_to_one :artist, :key=>[:artist_name, :artist_city]
Artist.one_to_many :albums, :key=>[:artist_name, :artist_city]
All association types are supported, including the built-in
many_to_many association and the many_through_many plugin. Both
methods of eager loading work with composite keys for all
association types. Setter and add/remove/remove_all methods
also now work with composite keys.
* Associations now respect a :validate option, which can be set to
false to not validate when implicitly saving associated objects.
There isn't a lot of implicit saving in Sequel's association
methods, but this gives the user the control over validation when
the association methods implicitly save an object.
* In addition to the regular association methods, the
nested_attributes plugin was also updated to respect the
:validate_association option. It was also modified to not validate
associated objects twice, once when the parent object was validated
and again when the associated object was saved. Additionally, if
you pass :validate=>false to the save method when saving the parent
object, it will not longer attempt to validate associated objects
when saving them.
* Dataset#insert and #insert_sql were refactored and now support the
following API:
* No arguments - Treat as a single empty hash argument
* Single argument:
* Hash - Use keys as columns and values as values
* Array - Use as values, without specifying columns
* Dataset - Use a subselect, without specifying columns
* LiteralString - Use as the values
* 2 arguments:
* Array, Array - Use first array as keys, second as values
* Array, Dataset - Use a subselect, with the array as columns
* Array, LiteralString - Use LiteralString as the values, with
the array as the columns
* Anything else: Treat all given values an an array of values
* Graphing now works with previously joined datasets. The main use
case of this is when eagerly loading (via eager_graph) model
associations for models backed by joined datasets, such as those
created by the class_table_inheritance plugin.
* Sequel.virtual_row was added allowing you to easily use the
VirtualRow support outside of select, order, and filter calls:
net_benefit = Sequel.virtual_row{revenue > cost}
good_employee = Sequel.virtual_row{num_commendations > 0}
fire = ~net_benefit & ~good_employee
demote = ~net_benefit & good_employee
promote = net_benefit & good_employee
DB[:employees].filter(fire).update(:employed=>false)
DB[:employees].filter(demote).update(:rank=>:rank-1)
DB[:employees].filter(promote).update(:rank=>:rank+1)
* When Sequel wraps exception in its own classes (to provide database
independence), it now keeps the wrapped exception available in
a wrapped_exception accessor. This allows you to more easily
determine the wrapped exception class, without resorting to parsing
the exception message.
begin
DB.run('...')
rescue Sequel::DatabaseError => e
case e.wrapped_exception
when Mysql::Error
...
when PGError
...
end
end
* The MySQL adapter now supports a Dataset#split_multiple_result_sets
method that yields arrays of rows (one per result set), instead of
rows. This allows you to submit multiple statements at the same
time (or call a stored procedure that returns multiple result
sets), and know which rows are related to which result sets.
This violates a lot of Sequel's internal assumptions and should be
used with care. Existing row_procs are modified to work correctly,
but graphing will not work on these datasets.
* The ADO adapter now accepts a :conn_string option and uses that
as the full ADO connection string. This can be used to connect to
any datasource ADO supports, such as Microsoft Excel.
* The Microsoft SQL Server shared adapter now supports a
Database#server_version method.
* The Microsoft SQL Server shared adapter now supports updating and
deleting from joined datasets.
* The Microsoft SQL Server shared adapter now supports a
Dataset#output method that uses the OUTPUT clause.
* Model#_save now calls either Model#_insert or Model#_update for
inserting/updating the row in the database. This allows for easier
overriding when you want to allow creating and updating model
objects backed by a joined dataset.
* Dataset#graph now takes a :from_self_alias option specifying the
alias to use for the subselect created if the receiver is a joined
but not yet graphed dataset. It defaults to the first source table
in the receiver.
Other Improvements
------------------
* Typecasting model attributes is now done before checking existing
values, instead of after. Before, the code for the model attribute
setters would compare the given value to the existing entry. If it
didn't match, the value was typecasted and then assigned. That led
to the following situation:
a = Album[1]
a.num_tracks # => 10
params # => {'num_tracks'=>'10'}
a.set(params)
a.changed_columns # => [:num_tracks]
The new behavior typecasts the value first, and only sets it and
records the column as changed if it doesn't match the typecasted
value.
* Model#modified? is now always true if the record is new. modified?
indicates the instance's status relative to the database, and since
a new object is not yet in the database, and saving the object
would add it, the object is considered modified. A consequence of
this is that Model#save_changes now always saves if the object is
new.
If you want to check if there were changes to columns since the
object was first initialized, you should use
!changed_columns.empty?, which was the historical way to handle
the situation.
* The DataObjects (do) adpater now supports DataObjects 0.10.
* Dataset#select_more and Dataset#order_more no longer affect the
receiver. They are supposed to just return a modified copy of the
receiver instead of modifying the receiver itself. For a few
versions they have been broken in that they modified the receiver
in addition to returning a modified copy.
* Performance was increased for execution of prepared statements
with multiple bound variables on MySQL.
* On MySQL, database errors raised when preparing statements or
setting bound variable values are now caught and raised as
Sequel::DatabaseErrors.
* On MySQL, more types of disconnection errors are detected.
* When altering columns in MySQL, options such as :unsigned,
:elements, and :size that are given in the call are now respected.
* MySQL enum defaults are now handled correctly in the schema dumper.
* The schema dumper no longer attempts to use unparseable defaults
as literals on MySQL, since MySQL does not provide defaults as
valid literals.
* The emulated offset support in the shared Microsoft SQL Server
adapter now works better with model classes (or any datasets with
row_procs).
* Microsoft SQL Server now supports using the WITH clause in delete,
update, and insert calls.
* Parsed indexes when connecting to Microsoft SQL Server via JDBC no
longer include primary key indexes.
* Dataset#insert_select now returns nil if disable_insert_returning
is used in the shared PostgreSQL adapter. This makes it work as
expected with model object creation.
* Calling Model.set_primary_key with an array of symbols to set
a composite primary key is now supported. You can also provide
multiple symbol arguments to do the same thing. Before, specifying
an array of symbols broke the Model.[] optimization.
* Literalization of timezones in timestamps now works correctly on
Oracle.
* __FILE__ and __LINE__ are now used everywhere that eval is called
with a string, which makes for better backtraces.
* The native MySQL adapter now correctly handles returning before
yielding all result sets. Previously, this caused a commands out
of sync error.
* Table names in common table expressions are now quoted.
* The Oracle adapter's Dataset#except now accepts a hash, giving it
the same API as the default Dataset#except.
* When connecting to Microsoft SQL Server via ADO, allow
Dataset#insert to take multiple arguments.
* Fractional timestamps are no longer used on ODBC.
* Schema parsing now works on MSSQL when the database is set to not
quote identifiers.
* Timezone offsets are no longer used on Microsoft SQL Server, since
they only work for the datetimeoffset type.
* Only 3 fractional digits in timestamps are used in Microsoft SQL
Server, since an error is raised if the use the datetime type
with more than that.
* The integration test suite now has guards for expected failures
when run on known databases. Expected failures are marked as
pending.
Backwards Compatibility
-----------------------
* Graphing to an previously joined (but not graphed) dataset now
causes the receiver to be wrapped in a subselect, so if you
graph a dataset to a previously joined dataset, and then filter
the dataset referring to tables that were in the joined dataset
(other than the first table), the SQL produced will probably no
longer be valid. You should either filter the dataset before
graphing or use the name of the first source of the joined
dataset (which is what the subselected is aliased to) if filtering
afterward.
In certain cases, this change can cause tables to be aliased
differently, so if you were graphing previously joined datasets
and then filtering using the automatically generated aliases, you
might need to modify your code.
* The DataObjects (do) adpater no longer supports DataObjects 0.9.x.
* The Dataset#virtual_row_block_call private instance method has
been removed.
* Sequel's timezone support was significantly refactored, so if you
had any custom modifications to the timezone support, they might
need to be refactored as well.
* The SQL generation code was significantly refactored, so if you
had any custom modifications in that area, you might need to
refactor as well.
sequel-5.63.0/doc/release_notes/3.6.0.txt 0000664 0000000 0000000 00000034113 14342141206 0017704 0 ustar 00root root 0000000 0000000 New Features
------------
* Dataset#filter and related methods now accept a string with named
placeholders, and a hash with placeholder values:
ds.filter('copies_sold > :sales', :sales=>500000)
Sequel's general support for this syntax is nicer:
ds.filter{copies_sold > 500000}
But named placeholder support can make it easier to port code
from other database libraries. Also, it works much better than
the ? placeholder support if you have a long SQL statement:
DB['SELECT :n FROM t WHERE p > :q AND p < :r', :n=>1,:q=>2,:r=>3]
Sequel doesn't subsitute values that don't appear in the hash:
ds.where('price < :p AND id in :ids', :p=>100)
# WHERE (price < 100 AND id in :ids)
This makes it easier to spot missed placeholders, and avoids issues
with PostgreSQL's :: casting syntax or : inside string literals.
* The Model add_ association method now accepts a hash and creates
a new associated model object associated to the receiver:
Artist[:name=>'YJM'].add_album(:name=>'RF')
* The Model remove_ association method now accepts a primary key
and removes the associated model object from the association. For
models using composite primary keys, an array of primary key values
can be used. Example:
Artist[:name=>'YJM'].remove_album(1) # regular pk
Artist[:name=>'YJM'].remove_album([2, 3]) # composite pk
* Dataset#bind was added, allowing you to bind values before calling
Dataset#call. This is more consistent with Sequel's general
approach where queries can be built in any order.
* The native postgres adapter now has Dataset#use_cursor, which
allows you to process huge datasets without keeping all records in
memory. The default number of rows per cursor fetch is 1000, but
that can be modified:
DB[:huge_table].use_cursor.each{|r| p r}
DB[:huge_table].use_cursor(:rows_per_fetch=>10000).each{|r| p r}
This probably won't work with prepared statements or
bound variables.
* The nested_attributes plugin now adds newly created objects to the
cached association array immediately, even though the changes
are not persisted to the database until after the object is saved.
The reasoning for this is that otherwise there is no way to access
the newly created associated objects before the save, and no way
to access them at all if validation fails.
This makes the nested_attributes plugin much easier to use, since
now you can just iterate over the cached association array when
building the form. If validation fails, it will have the newly
created failed objects in the array, so you can easily display the
form as the user entered it for them to make changes.
This change doesn't affect many_to_one associations, since those
don't have a cached association array. This also does not affect
updating existing records, since those are already in the cached
array.
* You can now easily override the default options used in the
validation_helpers plugin (the recommended validation plugin).
Options can be overridden at a global level:
Sequel::Plugins::ValidationHelpers::DEFAULT_OPTIONS[:format].
merge!(:message=>"incorrect format", :allow_missing=>true)
Options can also be overridden on a per-class level:
class Album < Sequel::Model
plugin :validation_helpers
DEFAULT_VALIDATION_OPTIONS = {
:format=>{:message=>"incorrect format", :allow_missing=>true}}
private
def default_validation_helpers_options(type)
super.merge(DEFAULT_VALIDATION_OPTIONS[type] || {})
end
end
* You can now use a proc instead of a string for the
validation_helpers :message option. This should allow much
easier internationalization support. If a proc is given, Sequel
calls it to get the format string to use. Whether the proc should
take an argument depends on whether the associated validation
method takes an argument before the array of columns to validate,
and the argument provided is what is passed to the proc. The
exception to this is the validates_not_string method, which doesn't
take an argument, but does pass one to the proc (a symbol with the
schema type of the column).
Combined with the above default option support, full
internationalization support for the validation_helpers plugin
should be fairly easy.
* The nested_attributes plugin now accepts a :fields option that
specifies the fields that are allowed. If specified, the
plugin will use set_only instead of set when mass assigning
attributes. Without this, the only way to control which fields
are allowed is to set allowed/restricted attributes at a class
level in the associated class.
* Associations now accept a :distinct option which uses the SQL
DISTINCT clause. This can be used instead of :uniq for
many_to_many and many_through_many associations to handle
the uniqueness in the database instead of in ruby. It can
also be useful for one_to_many associations to models that
don't have primary keys.
* The caching plugin now accepts an :ignore_exceptions option that
allows it to work with memcached (which raises exceptions instead
of returning nil for missing records).
* Sequel now emulates JOIN USING poorly using JOIN ON for databases
that don't support JOIN USING (MSSQL and H2). This isn't
guaranteed to work for all queries, since USING and ON have
different semantics, but should work in most cases.
* The MSSQL shared adapter now supports insert_select, for faster
model object creation. If for some reason you need to disable it,
you can use disable_insert_output.
* Model#modified! has been added which explicitly marks the object
as modified. So even if no column values have been modified,
calling save_changes/update will still run through the regular
save process and call all before and after save/update hooks.
* Model#marshallable! has been added which removes unmarshallable
attributes from the object. Previously, you couldn't marshal
a saved model object because it contained a dataset with a
singleton class. Custom _dump and _load methods could be used
instead, but this approach is easier to implement.
* Dataset#literal_other now calls sql_literal on the object with
the current dataset instance, if the object responds to it.
This makes it easier to support the literalization of arbitrary
objects.
Note that if the object is a subclass of a class handled by
an existing dataset literalization method, you cannot use this
method. You have to override the specific Dataset#literal_* method
in that case.
* Model#save_changes now accepts an option hash that is passed to
save:
album.save_changes(:validate=>false)
* A bunch of Dataset#*_join methods have been added, for specific
join types:
* cross_join
* natural_join
* full_join
* left_join
* right_join
* natural_full_join
* natural_left_join
* natural_right_join
Previously, you had to use join_table(:cross, ...) to use a CROSS
JOIN.
* You can now create clustered indexes on Microsoft SQL Server using
the :clustered option.
* AssociationReflection#associated_object_keys has been added,
specifying the keys in the associated model object that are related
to this association.
* Sequel::SQL::SQLArray#to_a was added.
Other Improvements
------------------
* Constant lookup in virtual row blocks now works correctly in ruby
1.9. Virtual row blocks are based on BasicObject on ruby 1.9,
which doesn't allow referencing objects in the top level scope. So
the following code would cause an error on 1.9:
DB[:bonds].filter{maturity_date > Time.now}
Sequel now uses a Sequel::BasicObject class on 1.9 with a
const_missing that looks up constants in Object, which allows the
above code to work.
* Sequel no longer attempts to load associated objects when
one of the key fields in the current table is NULL. This fixes
the behavior when the :primary_key option for the association
is used to point to a non-primary key.
A consequence of this change is that attempting to load a
*_to_many association for a new model object now returns
an empty array instead of raising an exception. This has its
own advantage of allowing the same association viewing code
to work on both new and existing objects. Previously, you had
to actively avoid calling the association method on new objects,
or Sequel would raise an exception.
* Dataset aggreate methods (sum/avg/min/max/range/interval) now
work correctly with limited, grouped, or compound datasets.
Previously, count worked with them, but other aggregate methods
did not. These methods now use a subquery if called on a
limited, grouped or compound dataset.
* It is no longer required to have an existing GROUP BY clause
to use a HAVING clause (except on SQLite, which doesn't permit
it). Sequel has always had this limitation, but it's not required
by the SQL standard, and there are valid reasons to use HAVING
without GROUP BY.
* Sequel will now emulate support for databases that don't support
multiple column IN/NOT IN syntax, such as MSSQL and SQLite:
ds.filter([:col1, :col2]=>[[1, 2], [3, 4]].sql_array)
# default: WHERE (col1, col2) IN ((1, 2), (3, 4))
# emulated: WHERE (((col1 = 1) AND (col2 = 2)) OR
# ((col1 = 3) AND (col2 = 4)))
This is necessary for eager loading associated objects for models
with composite primary keys.
* Sequel now emulates :column.ilike('blah%') for case insensitive
searches on MSSQL and H2. MSSQL is case insensitive by default,
so it is the same as like. H2 is case sensitive, so Sequel
uses a case insensitive cast there.
* The nested_attributes plugin no longer allows modification of
keys related to the association. This fixes a possible security
issue with the plugin, where a user could associate the nested
record to a different record. For example:
Artist.one_to_many :albums
Artist.plugin :nested_attributes
Artist.nested_attributes :albums
artist = Artist.create
artist2 = Artist.create
album = Album.create
artist.add_album(album)
artist.albums_attributes = [{:id=>album.id,
:artist_id=>artist2.id}]
artist.save
* The one_to_many remove_* association method now makes sure that the
object to be removed is currently associated to this object.
Before, the method could be abused to disassociate the object from
whatever object it was associated to.
* Model add_ and remove_ association methods now check that the passed
object is of the correct class.
* Calling the add_* association method no longer adds the record
to the cached association array if the object is already in the
array. Previously, Sequel did this for reciprocal associations,
but not for regular associations.
This makes the most sense for one_to_many associations, since
those can only be associated to the object once. For many_to_many
associations, if you want an option to disable the behavior, please
bring it up on the Sequel mailing list.
* An array with a string and placeholders that is passed to
Dataset#filter is no longer modified. Previously:
options = ["name like ?", "%dog%"]
DB[:players].where(options)
options # => ["%dog%"]
* Getting the most recently inserted autoincremented primary key
is now optimized when connecting to MySQL via JDBC.
* Model.inherited now calls Class.inherited.
* The MSSQL shared adapter once again works on ruby 1.9. It was
broken in 3.5.0 due to minor syntax issues.
* The force_encoding plugin now handles refreshing an existing
object, either explicitly or implicitly when new objects are
created.
To use the force_encoding plugin with the identity_map plugin, the
identity_map plugin should be loaded first.
* Using nil as a bound variable now works on PostgreSQL. Before,
Sequel would incorrectly use "" instead of NULL, since it
transformed all objects to strings before binding them. Sequel
now binds the objects directly.
* The Amalgalite adapter is now significantly faster, especially for
code that modifies the schema or submits arbitrary SQL statements
using Database <<, run, or execute_ddl.
* Model#save_changes is now used when updating existing associated
objects in the nested_attributes plugin. This should be
significantly faster for the common case of submitting a complex
form with nested objects without making modifications.
* You can now prepare insert statements that take multiple arguments,
such as insert(1, 2, 3) and insert(columns, values).
* Dataset#group_and_count now supports aliased columns.
* Adding indexes to tables outside the default schema now works.
* Eager graphing now works better with models that use aliased tables.
* Sequel now correctly parses the column schema information for tables
in a non-default schema on Microsoft SQL Server.
* changed_columns is now cleared for when saving new model objects
for adapters that support insert_select, such as PostgreSQL.
* Dataset#replace on MySQL now works correctly when default values
are used.
* Dataset#lock on PostgreSQL now works correctly.
* Dataset#explain now works correctly on SQLite, and works using
any adapter. It also works correctly on Amalgalite.
* The JDBC adapter now handles binding Time arguments correctly when
using prepared statements.
* Model add_ and remove_ association methods now have more
descriptive exception messages.
* Dataset#simple_select_all? now ignores options that don't affect
the SQL, such as :server.
* Dataset#window in the PostgreSQL adapter now respects existing
named windows.
* Sequel now better handles a failure to begin a new transaction.
* The dataset code was split into some additional files for improved
readability.
* Many documentation improvements were made.
Backwards Compatibility
-----------------------
* Model::Errors no longer uses a default proc, but emulates one in the
[] method. This is unlikely to have a negative affect unless you
are calling a method on it that doesn't call [] (maybe using it in
a C extension?).
* Model#table_name now only provides the alias if an aliased table is
used.
* The Sequel::Dataset::STOCK_COUNT_OPTS constant has been removed.
* Dataset#lock on PostgreSQL now returns nil instead of a dataset.
sequel-5.63.0/doc/release_notes/3.7.0.txt 0000664 0000000 0000000 00000015126 14342141206 0017710 0 ustar 00root root 0000000 0000000 New Features
------------
* Sequel now has support for deleting and updating joined datasets
on MySQL and PostgreSQL. Previously, Sequel only supported this to
a limited extent on Microsoft SQL Server, and support there has been
improved as well.
This allows you to do:
DB.create_table!(:a){Integer :a; Integer :d}
DB.create_table!(:b){Integer :b; Integer :e}
DB.create_table!(:c){Integer :c; Integer :f}
# Insert some rows
ds = DB.from(:a, :b).
join(:c, :c=>:e.identifier).
where(:d=>:b)
ds.where(:f=>6).update(:a => 10)
ds.where(:f=>5).delete
Which will set the a column to 10 for all rows in table a, where
an associated row in table c (through table b) has a value of 6 for
column f. It will delete rows from table a where an associated row
in table c (through table b) has a value of 5 for column f.
Sequel assumes the that first FROM table is the table being
updated/deleted. MySQL and Microsoft SQL Server do not require
multiple FROM tables, but PostgreSQL does.
* Dataset #select_map, #select_order_map, and #select_hash
convenience methods were added for quickly creating arrays and
hashes from a dataset.
select_map and select_order_map both return arrays of values for the
column specified. The column can be specified either via an argument
or a block, similar to Dataset#get. Both accept any valid objects as
arguments.
select_hash returns a hash. It requires two symbol arguments, but
can handle implicit qualifiers or aliases in the symbols.
Neither of these methods offer any new functionality, they just cut
down on the number of required key strokes:
select_map(:column) # select(:column).map(:column)
select_order_map(:column) # select(:column).order(:column).
# map(:column)
select_hash(:key_column, :value_column)
# select(:key_column, :value_column).
# to_hash(:key_column, :value_column)
* The NULL, NOTNULL, TRUE, SQLTRUE, FALSE, and SQLFALSE constants
were added to Sequel::SQL::Constants. This allows you to do:
include Sequel::SQL::Constants
DB[:table].where(:a=>'1', :b=>NOTNULL)
Previously, the shortest way to do this was:
DB[:table].where(:a=>'1').exclude(:b=>nil)
It may make the code more descriptive:
DB[:table].where(:b=>NULL)
# compared to
DB[:table].where(:b=>nil)
This gives the option to use SQL terminology instead of ruby
terminology.
The other advantage of using the constants it that they handle
operators and methods like other Sequel::SQL objects:
NULL & SQLFALSE # BooleanExpression => "(NULL AND FALSE)"
nil & false # false
NULL + :a # NumericExpression => "(NULL + a)"
nil + :a # raises NoMethodError
NULL.sql_string + :a # StringExpression => "(NULL || a)"
NULL.as(:b) # AliasedExpression => "NULL AS b"
For complex systems that want to represent SQL boolean objects
in ruby (where you don't know exactly how they'll be used), using
the constants is recommended.
In order not to be too verbose, including Sequel::SQL::Constants
is recommended. It's not done by default, but you can still
reference the constants under the main Sequel module by default
(e.g. Sequel::NULL).
* The validates_unique method in the validation_helpers plugin now
supports an :only_if_modified option, which should speed up the
common case where the unique attribute is not modified for an
existing record. It's not on by default, since it's possible the
database could be changed between retrieving the model object and
updating it.
* The Dataset #union, #intersect, and #except methods now accept an
:alias option which is used as the alias for the returned dataset.
DB[:table].union(DB[:old_table], :alias=>:table)
* Model#destroy now supports a :transaction option, similar to
Model#save.
* The shared Oracle adapter now supports Dataset#sequence for
returning autogenerated primary key values on insert from a
related sequence.
This makes Oracle work correctly when using models, with
something like the following:
class Album < Sequel::Model
set_dataset dataset.sequence(:seq_albums_id)
end
You currently need to call Dataset#sequence in every model
class where the underlying table uses a sequence to generate
primary key values.
Other Improvements
------------------
* In Model #save and #destroy when using transactions and when
raise_on_save_failure is false, ensure that transactions are rolled
back if a before hook returns false.
* Dataset#group_and_count now handles arguments other than Symbols.
A previous change to the method raised an exception if a Symbol was
not provided. It also handles AliasedExpressions natively, so the
following works correctly:
DB[:table].group_and_count(:column.as(:alias))
* Sequel no longer uses native autoreconnection in the mysql adapter.
Native autoreconnection has problems with prepared statements,
where a new native connection is used behind Sequel's back, so
Sequel thinks the prepared statement has already been defined on
the connection, when it fact it hasn't. Any other changes that
affect the state of the connection will be lost when native
autoreconnection is used as well.
Sequel's connection pool already handles reconnection if it detects
a disconnection. This commit also adds an additional exception
message to recognize as a disconnect. If there other exception
messages related to disconnects, please post them on the Sequel
mailing list.
* The schema_dumper plugin now specifies the :type option for primary
key if it isn't Integer.
* On PostgreSQL, the bigserial type is used if :type=>Bignum is
given as an option to primary key. This makes it operate more
similarly to other adapters that support autoincrementing 64-bit
integer primary keys.
* The native mysql adapter will now attempt to load options in the
[client] section of the my.cnf file.
* The rake spec tasks for the project now work correctly with RSpec
1.2.9.
Backwards Compatibility
-----------------------
* Dataset::GET_ERROR_MSG and Dataset::MAP_ERROR_MSG constants were
removed. Both were replaced with Dataset::ARG_BLOCK_ERROR_MSG.
* The behavior of the Model#save_failure private instance method was
modified. It now always raises an exception, and validation
failures no longer call it.
* The internals of how autogenerated primary key metadata is stored
when creating tables on PostgreSQL has been modified.
* The native MySQL adapter no longer sets the OPT_LOCAL_INFILE option
to "client" on the native connection.
sequel-5.63.0/doc/release_notes/3.8.0.txt 0000664 0000000 0000000 00000013407 14342141206 0017711 0 ustar 00root root 0000000 0000000 New Features
------------
* Dataset#each_server was added, allowing you to run the same query
(most likely insert/update/delete) on all shards. This is useful
if you have a sharded database but have lookup tables that should
be identical on all shards. It works by yielding copies of the
current dataset that are tied to each server/shard:
DB[:table].filter(:id=>1).each_server do |ds|
ds.update(:name=>'foo')
end
* Database#each_server was added, allowing you to run schema
modification methods on all shards. It works by yielding a
new Sequel::Database object for each shard, that will connect to
only that shard:
DB.each_server do |db|
db.create_table(:t){Integer :num}
end
* You can now add and remove servers/shards from the connection
pool while Sequel is running:
DB.add_servers(:shard1=>{:host=>'s1'}, :shard2=>{:host=>'s2'})
DB.remove_servers(:shard1, :shard2)
* When you attempt to disconnect from a server that has connections
currently in use, Sequel will now schedule those connections to
be disconnected when they are returned to the pool. Previously,
Sequel disconnected available connections, but ignored connections
currently in use, so it wasn't possible to guarantee complete
disconnection from the server. Even with this new feature, you can
only guarantee eventual disconnection, since disconnection of
connections in use happens asynchronously.
* Database#disconnect now accepts a :servers option specifying the
server(s) from which to disconnect. This should be a symbol or
array of symbols representing servers/shards. Only those specified
will be disconnected:
DB.disconnect(:servers=>[:shard1, :shard2])
* A validates_type validation was added to the validation_helpers
plugin. It allows you to check that a given column contains
the correct type. I can be helpful if you are also using the
serialization plugin to store serialized ruby objects, by making
sure that the objects are of the correct type (e.g. Hash):
def validate
validates_type(Hash, :options)
end
* Sequel::SQL::Expression#== is now supported for all expressions:
:column.qualify(:table).cast(:type) == \
:column.qualify(:table).cast(:type)
# => true
:column.qualify(:table).cast(:type) == \
:other_column.qualify(:table).cast(:type)
# => false
* When using the generic File type to create blob columns on
MySQL, you can specify the specific database type by using the
:size option (with :tiny, :medium, and :long values recognized):
DB.create_table(:docs){File :body, :size=>:long} # longblob
* The mysql adapter will now default to using mysqlplus, falling
back to use mysql. mysqlplus is significantly better for threaded
code because queries do not block the entire interpreter.
* The JDBC adapter is now able to detect certain types of disconnect
errors.
* ConnectionPool.servers and Database.servers were added, which
return an array of symbols specifying the servers/shards in use.
Other Improvements
------------------
* The single-threaded connection pool now raises
DatabaseConnectionErrors if unable to connect, so it now operates
more similarly to the default connection pool.
* The single-threaded connection pool now operates more similar
to the default connection pool when given a nonexistent server.
* PGErrors are now correctly converted to DatabaseErrors in the
postgres adapter when preparing statements or executing prepared
statements.
* DatabaseDisconnectErrors are now raised correctly in the postgres
adapter if the connection status is not OK after a query raises an
error.
* In the mysql adapter, multiple statements in a single query should
now be handled correctly in the all cases, not just when using
Dataset#each. So you can now submit multiple queries in a single
string to Database#run.
* Model object creation on Microsoft SQL Server 2000 once again
works correctly. Previously, an optimization was used that was
only supported on 2005+.
* Backslashes are no longer doubled inside string literals when
connecting to Microsoft SQL Server.
* The ORDER clause now correctly comes after the HAVING clause on
Microsoft SQL Server.
* Sequel now checks that there is an active transaction before
rolling back transactions on Microsoft SQL Server, since
there are cases where Microsoft SQL Server will roll back
transactions implicitly.
* Blobs are now handled correctly when connecting to H2.
* 64-bit integers are now handled correctly in JDBC prepared
statements.
* In the boolean_readers plugin, correctly handle columns not in
the db_schema, and don't raise an error if the model's columns
can't be determined.
* In the identity_map plugin, remove instances from the cache if they
are deleted or destroyed.
Backwards Compatibility
-----------------------
* Dataset::FROM_SELF_KEEP_OPTS was merged into
Dataset::NON_SQL_OPTIONS. While used in different places, they
were used for the same purpose, and entries missing from one should
have been included in the other.
* The connection pool internals changed substantially. Now,
ConnectionPool #allocated and #available_connections will return
nil instead of an array or hash if they are called with a
nonexistent server. These are generally only used internally,
though they are part of the public API. #created_count and #size
still return the size of the :default server when called with a
nonexistent server, though.
* The meta_eval and metaclass private methods were removed from
Sequel::MetaProgramming (only the meta_def public method remains).
If you want these methods, use the metaid gem.
* The irregular ox->oxen pluralization rule was removed from the
default inflections, as it screws up the more common box->boxes.
sequel-5.63.0/doc/release_notes/3.9.0.txt 0000664 0000000 0000000 00000021747 14342141206 0017720 0 ustar 00root root 0000000 0000000 New Features
------------
* The ConnectionPool classes were refactored from 2 separate
classes to a 5 class hierarchy, with one main class and 4
subclasses, one for each combination of sharding and threading.
The primary reason for this refactoring is to make it so that
the user doesn't have to pay a performance penalty for sharding
if they aren't using it. A connection pool that supports sharding
is automatically used if the :servers option is used when setting
up the database connection.
In addition, the default connection pool no longer contains
the code to schedule future disconnections of currently allocated
connections. The sharded connection pool must be used if that
feature is desired.
The unsharded connection pools are about 25-30% faster than the
sharded versions.
* An optimistic_locking plugin was added to Sequel::Model. This
plugin implements a simple database-independent locking mechanism
to ensure that concurrent updates do not override changes:
class Person < Sequel::Model
plugin :optimistic_locking
end
p1 = Person[1]
p2 = Person[1]
# works
p1.update(:name=>'Jim')
# raises Sequel::Plugins::OptimisticLocking::Error
p2.update(:name=>'Bob')
In order for this plugin to work, you need to make sure that the
database table has a lock_version column (or other column you name
via the lock_column class level accessor) that defaults to 0.
The optimistic_locking plugin does not work with the
class_table_inheritance plugin.
* Dataset#unused_table_alias was added, which takes a symbol and
returns either that symbol or a new symbol which can be used as
a table alias when joining a table to the dataset. The symbol
returned is guaranteed to not already be used by the dataset:
DB[:test].unused_table_alias(:blah) # => :blah
DB[:test].unused_table_alias(:test) # => :test_0
The use case is when you need to join a table to a dataset, where
the table may already be used inside the dataset, and you want
to generate a unique alias:
ds.join(:table.as(ds.unused_table_alias(:table)), ...)
* The Sequel::ValidationFailed exception now has an errors accessor
which returns the Sequel::Model::Errors instance with the
validation errors. This can be helpful in situations where a
generalized rescue is done where the model object reference is
not available.
* bin/sequel now works without an argument, which is useful for
testing SQL generation (and not much else).
* Support SELECT ... INTO in the MSSQL adapter, using Dataset#into,
which takes a table argument.
* You can now provide your own connection pool class via the
:pool_class option when instantiating the database.
Other Improvements
------------------
* IN/NOT IN constructs with an empty array are now handled properly.
DB[:table].filter(:id=>[]) # IN
DB[:table].exclude(:id=>[]) # NOT IN
Before, the IN construct would mostly work, other than some minor
differences in NULL semantics. However, the NOT IN construct
would not work. Sequel now handles the NOT IN case using an
expression that evaluates to true.
* If using an IN/NOT IN construct with multiple columns and a dataset
argument, where multiple column IN/NOT IN support is emulated, a
separate query is done to get the records, which is then handled
like an array of values. This means that the following type of
query now works on all tested databases:
DB[:table1].filter([:id1, :id2]=>DB[:table2].select(:id1, :id2))
* Schemas and aliases are now handled correctly when eager graphing.
* Implicitly qualified symbols are now handled correctly in update
statements, useful if you are updating a joined dataset and need
to reference a column that appears in multiple tables.
* The active_model plugin has been brought up to date with
activemodel 3.0 beta (though it doesn't work on edge).
Additionally, the active_model plugin now requires active_model
in order to use ActiveModel::Naming.
* In the schema_dumper extension, always include the varchar limit,
even if it is 255 columns (the default). This makes it so that
PostgreSQL will use a varchar(255) column instead of a text column
when restoring a schema dump of a varchar(255) column from another
database.
* You can now load adapters from outside the Sequel lib directory,
now they just need to be in a sequel/adapters directory somewhere
in the LOAD_PATH.
* You can now load extensions from outside the Sequel lib directory
using Sequel.extension. External extensions need to be in a
sequel/extensions directory somewhere in the LOAD_PATH.
* Using bound variables for limit and offset in prepared statements
now works correctly.
* Performance of prepared statements was improved in the native
SQLite adapter.
* The schema_dumper extension now passes the options hash from
dump_*_migration to Database#tables.
* In the single_table_inheritance plugin, qualify the sti_key column
with the table name, so that subclass datasets can safely be joined
to other tables having the same column name.
* In the single_table_inheritance plugin, handle case where the
sti_key value is nil or '' specially, so that those cases
always return an instance of the main model class. This fixes
issues if constantize(nil) returns Object instead of raising
an exception.
* No longer use Date#to_s for literalization, always use ISO8601
format for dates.
* A couple lambdas which were instance_evaled were changed to procs
for ruby 1.9.2 compatibility.
* MSSQL emulated offset support was simplified to only use one
subquery, and made to work correctly on ruby 1.9.
* Emulate multiple column IN/NOT IN on H2, since it doesn't handle
all cases correctly.
* ODBC timestamps are now handled correctly if the database_timezone
is nil.
* ArgumentErrors raised when running queries in the ODBC adapter are
now raised as DatabaseErrors.
* Attempting to use DISTINCT ON on SQLite now raises an error before
sending the query to the database.
* The options hash passed to the database connection method is no
longer modified. However, there may be additional options
present in Database#opts that weren't specified by the options
hash passed to the database connection method.
* Make Dataset#add_graph_aliases handle the case where the dataset
has not yet been graphed.
* You can now provide an SQL::Identifier as a 4th argument to
Dataset#join_table, and unsupported arguments are caught and an
exception is raised.
* The gem specification has been moved out of the Rakefile, so
that the gem can now be built without rake, and works well with
gem build and bundler.
* The Rakefile no longer assumes the current directory is in the
$LOAD_PATH, so it should work correctly on ruby 1.9.2.
* All internal uses of require are now thread safe.
* Empty query parameter keys in connection strings are now ignored
instead of raising an exception.
* The specs were changed so that you can run them in parallel.
Previously there was a race condition in the migration extension
specs.
Backwards Compatibility
-----------------------
* If you plan on using sharding at any point, you now must pass
a :servers option when connecting to the database, even if it is
an empty hash. You can no longer just call Database#add_servers
later.
* The connection_proc and disconnection_proc accessors were removed
from the connection pools, so you can no longer modify the procs
after the connection pool has been instantiated. You must now
provide the connection_proc as the block argument when
instantiating the pool, and the disconnection_proc via the
:disconnection_proc option.
* In the hash passed to Dataset#update, symbol keys with a double
embedded underscore are now considerated as implicit qualifiers,
instead of being used verbatim. If you have a column that includes
a double underscore, you now need to wrap it in an SQL::Identifier
or use a String instead.
* The connection pools no longer convert non-StandardError based
exceptions to RuntimeErrors. Previously, all of the common adapters
turned this feature off, so there is no change for most users.
* Sequel::ConnectionPool is now considered an abstract class and
should not be instantiated directly. Use ConnectionPool.get_pool
to return an instance of the appropriate subclass.
* The Sequel::SingleThreadedPool constant is no longer defined.
* The private Dataset#eager_unique_table_alias method was removed,
use the new public Dataset#unused_table_alias method instead, which
has a slightly different API.
* The private Dataset#eager_graph_qualify_order method was removed,
used Dataset#qualified_expression instead.
* The private Sequel::Model class methods plugin_gem_location and
plugin_gem_location_old have been removed.
* Gems built with the rake tasks now show up in the root directory
instead of the pkg subdirectory, and no tarball package is created.
Other News
----------
* Sequel now has an official blog at http://sequel.jeremyevans.net/blog.html.
sequel-5.63.0/doc/release_notes/4.0.0.txt 0000664 0000000 0000000 00000023633 14342141206 0017704 0 ustar 00root root 0000000 0000000 = Backwards Compatibility
* All behavior resulting in deprecation messages in 3.48.0 has been
removed or modified. If you plan on upgrading to Sequel 4.0.0 and
have not yet upgraded to 3.48.0, upgrade to 3.48.0 first, fix code
that results in deprecation warnings, and then upgrade to 4.0.0.
* The threaded connection pools now default to
:connection_handling=>:queue. You can manually set
:connection_handling=>:stack to get the previous behavior.
* Model.raise_on_typecast_failure now defaults to false. Set this to
true to get the previous behavior of raising typecast errors in the
setter methods.
* Model#save no longer calls Model#_refresh or Model#set_values
internally after an insert. Manual refreshes are now treated
differently than after creation refreshes.
* On SQLite, integer_booleans now defaults to true. Set this to
false to get the previous behavior of 't' for true and 'f' for
false. Sequel will not automatically upgrade your data, users
are responsible for doing that if they want to switch the
integer_booleans setting. Note that regardless of the setting,
Sequel will return the correct ruby values when retrieving the
rows.
Example Code to Migrate Existing Data:
DB[:table].where(:column=>'t').update(:column=>1)
DB[:table].where(:column=>'f').update(:column=>0)
* On SQLite, use_timestamp_timezones is now false by default. Set
this to true to get the previous behavior with timezone information
in timestamps. Sequel will not automatically upgrade your data,
users are responsible for doing that if they want to switch the
use_timestamp_timezones setting. Note that regardless of the
setting, Sequel will return the correct ruby values when
retrieving the rows.
* Using window functions when eagerly loading associations with
limits or offsets is now done automatically if the database
supports it. Previously, this had to be enabled manually. If
you would like to disable this optimization and just do the
slicing in ruby, set default_eager_limit_strategy = nil.
* The default value for most option hash arguments is now an shared
empty frozen hash. If you are overriding methods and modifying
option hashes, fix your code.
* The defaults_setter plugin now works in a lazy manner instead of
an eager manner. So calling the related method returns the
default value if there is no value stored, but Sequel does not
add the default values to the internal values hash, and will not
attempt to insert what it thinks is the default value when
saving the new object.
* Model#set_all and #update_all now allow setting the primary key
columns.
* The many_to_one_pk_lookup and association_autoreloading plugins
are now integrated into the default associations support.
* Plugins now extend the class with ClassMethods before including
InstanceMethods in the class.
* Dataset#get, #select_map, and #select_order_map now automatically
add aliases for unaliased expressions if given a single expression.
* Database#tables and #views on PostgreSQL now check against
the current schemas in the search path.
* Calling ungraphed on an eager_graph dataset will restore the
row_proc for that dataset. This is not backwards compatible if
your method chain does:
dataset.eager_graph.naked.ungraphed
Switch such code to:
dataset.eager_graph.ungraphed.naked
* The Model#set_restricted and #update_restricted private methods
have a slightly different API now.
* Sequel::SQL::SQLArray alias for ValueList has been removed.
* Sequel::SQL::NoBooleanInputMethods has been removed.
* Sequel::NotImplemented has been removed. Default implementations
of methods that used to raise this exception have been removed.
* Sequel::Model::EMPTY_INSTANCE_VARIABLES has been removed.
* The Sequel::Postgres::DatabaseMethods::EXCLUDE_SCHEMAS and
SYSTEM_TABLE_REGEXP constants have been removed.
* Dataset#columns_without_introspection has been removed from the
columns_introspection extension.
* Sequel no longer provides a default database for the adapter or
integration specs. Additionally, if you are using spec_config.rb
to configure a database to use when adapter/integration testing,
you may need to modify it, as Sequel now uses the DB constant for
the database being tested.
* The SEQUEL_MSSQL_SPEC_REQUIRE and SEQUEL_DB2_SPEC_REQUIRE
environment variables are no longer respected when
adapter/integration testing those databases. Use RUBYOPT with the
-r flag.
* In the 3.48.0 release notes, it was announced that
Dataset#join_table would default to :qualify=>:deep in 4.0.0. This
change was made but reverted before the release of 4.0.0 as it was
determined too likely to break existing code, there was no
deprecation warning (since it just changed a setting), and the
benefit was minimal. You can make deep qualification the default by
by overriding Dataset#default_join_table_qualification.
= New Features
* A pg_array_associations plugin has been added, for creating
an association based on a PostgreSQL array column containing
foreign keys. Example:
# Database schema:
# tags albums
# :id (int4) <--\ :id
# :name \-- :tag_ids (int4[])
# :name
class Album
plugin :pg_array_associations
pg_array_to_many :tags
end
class Tag
plugin :pg_array_associations
many_to_pg_array :albums
end
This operates similarly to a many_to_many association, but does not
require a join table. All of the usual Sequel association features
are supported, such as adding, removing, and clearing associations,
eager loading via eager and eager_graph, filtering by associations,
and dataset associations.
Note that until PostgreSQL gains the ability to enforce foreign key
constraints in array columns, this plugin is not recommended for
production use unless you plan on emulating referential integrity
constraints via triggers.
* Dataset#from now accepts virtual_row blocks, making it easy to use
with table returning functions:
DB.from{table_returning_function(arg)}
* Sequel.deep_qualify has been added, for easily doing a deep
qualification of objects:
Sequel.deep_qualify(:table, Sequel.+(:column, 1))
# ("table"."column" + 1)
Sequel.deep_qualify(:table, Sequel.like(:a, 'b'))
# ("table"."a" LIKE 'b' ESCAPE '\')
* The prepared_statements_associations plugin now handles one_to_one
associations.
* SQL::Subscript objects now handle ruby range arguments, operating as
an SQL array slice:
Sequel.subscript(:a, 1..2) # a[1:2]
* Database#create_view now accepts a :columns option to provide
explicit column names for the view.
* Postgres::ArrayOp#[] now returns an ArrayOp if given a range, since
a PostgreSQL array slice can be treated as an array.
* Postgres::ArrayOp#hstore has been added for creating hstores from
PostgreSQL arrays.
* When creating full text indexes on PostgreSQL, the :index_type=>:gist
option can be used to use a gist index instead of the default gin
index. This can be useful if insert/update speed is more important
than lookup speed.
* You can now provide the :owner option to Database#create_schema on
PostgreSQL to specify the owner of the schema.
* You can now provide the :if_exists option to Database#drop_view
on PostgreSQL to not raise an error if the view doesn't exist.
* The pg_json extension now handles non-JSON plain strings, integers
and floats in PostgreSQL JSON columns.
= Support for New Features in PostgreSQL 9.3
* A pg_json_ops extension has been added to support the new json
operators and functions.
* Postgres::ArrayOp#replace and #remove have been added for using the
array_replace and array_remove functions.
* You can now provide the :if_not_exists option when using
Database#create_schema on PostgreSQL to not raise an error if the
schema already exists.
* Database#create_view now supports a :recursive option on PostgreSQL
for creating recursive views.
* Database#create_view and #drop_view now support a :materialized option
on PostgreSQL for creating/dropping materialized views.
* Database#refresh_view has been added on PostgreSQL for refreshing
materialized views.
= Other Improvements
* Check constraints are now always surrounded by parantheses, since that
is required by the SQL standard. This fixes issues in the cases where
parentheses were not used automatically, such as when a function call
was used.
* Using an offset without a limit when eager loading now works
correctly.
* The prepared_statements_associations plugin now works correctly when
the associated class uses a filtered dataset.
* The prepared_statements_associations plugin can now use a prepared
statement for cases where the association uses :conditions.
* Boolean prepared statement arguments now work correctly in the sqlite
adapter when the integer_booleans setting is true.
* Dataset#inspect on prepared statements now handles anonymous dataset
classes correctly.
* When dataset string/blob literalization depends on having a database
connection and the dataset has an assigned server, a connection to
the assigned server is used.
* More disconnect errors are now handled when using the postgres
adapter with the postgres-pr driver, and in the jdbc/oracle adapter.
* Composite primary keys are now parsed correctly on SQLite 3.7.16+.
* Blobs are now hex escaped on MySQL, which can solve some encoding
issues when blobs are used as literals in the same SQL query with
UTF-8 strings.
* BigDecimals instances are now formatted nicer in the pretty_table
extension.
* Sequel now raises an exception when attempting to literalize infinite
and NaN floats on MySQL. In general, this would result in MySQL
raising an error, but in extreme cases it could have failed silently.
* You can now use a NO_SEQUEL_PG environment variable to not
automatically require sequel_pg in the postgres adapter.
* Dataset#unbind now always uses symbol keys in the bind variable hash.
sequel-5.63.0/doc/release_notes/4.1.0.txt 0000664 0000000 0000000 00000006175 14342141206 0017707 0 ustar 00root root 0000000 0000000 = New Features
* Database#run and #<< now accept SQL::PlaceholderLiteralString
objects, allowing you to more easily run arbitrary DDL queries with
placeholders:
DB.run Sequel.lit("CREATE TABLE ? (? integer)", :table, :column)
* You can now provide options for check constraints by calling the
constraint/add_constraint methods with a hash as the first argument.
On PostgreSQL, you can now use the :not_valid option for check
constraints, so they are enforced for inserts and updates, but
not for existing rows.
DB.create_table(:table) do
...
constraint({:name=>:constraint_name, :not_valid=>true}) do
column_name > 10
end
end
* Dataset#stream has been added to the mysql2 adapter, and will have
the dataset stream results if used with mysql2 0.3.12+. This
allows you to process large datasets without keeping the entire
dataset in memory.
DB[:large_table].stream.each{|r| ...}
* Database#error_info has been added to the postgres adapter. It
is supported on PostgreSQL 9.3+ if pg-0.16.0+ is used as the
underlying driver, and it gives you a hash of metadata related
to the exception:
DB[:table_name].insert(1) rescue DB.error_info($!)
# => {:schema=>"public", :table=>"table_name", :column=>nil,
:constraint=>"constraint_name", :type=>nil}
* The :deferrable option is now supported when adding exclusion
constraints on PostgreSQL, to allow setting up deferred exclusion
constraints.
* The :inherits option is now supported in Database#create_table on
PostgreSQL, for table inheritance:
DB.create_table(:t1, :inherits=>:t0){}
# CREATE TABLE t1 () INHERITS (t0)
* Dataset#replace and #multi_replace are now supported on SQLite,
just as they have been previously on MySQL.
* In the jdbc adapter, Java::JavaUtil::HashMap objects are now
converted to ruby Hash objects. This is to make it easier to
handle the PostgreSQL hstore type when using the jdbc/postgres
adapter.
* The odbc adapter now supports a :drvconnect option that accepts
an ODBC connection string that is passed to ruby-odbc verbatim.
= Other Improvements
* The prepared_statements plugin no longer breaks the
instance_filters and update_primary_key plugins.
* Dropping indexes for tables in a specific schema is now supported
on PostgreSQL. Sequel now explicitly specifies the same schema
as the table when dropping such indexes.
* Calling Model#add_association methods with a primary key value
now raises a Sequel::NoMatchingRow if there is no object in the
associated table with that primary key. Previously, this
situation was not handled and resulted in a NoMethodError being
raised later.
* When an invalid virtual row block function call is detected, an
error is now properly raised. Previously, the error was not
raised until the SQL was produced for the query.
= Backwards Compatibility
* The :driver option to the odbc adapter is deprecated and will be
removed in a future version. It is thought to be broken, and
users wanting to use DSN-less connections should use the new
:drvconnect option.
* The Postgres::ArrayOp#text_op private method has been removed.
sequel-5.63.0/doc/release_notes/4.10.0.txt 0000664 0000000 0000000 00000022027 14342141206 0017761 0 ustar 00root root 0000000 0000000 = Performance Enhancements
* Dataset literalization for simple datasets is now faster by
creating a per-adapter SQL literalization method instead of
having all adapters share a generic method with higher overhead.
Sequel.split_symbol now caches results globally. Symbol
literalization is now cached per Database.
Combining these three optimizations, here are the performance
increases compared to 4.9.0 for a couple example datasets:
ds1 = DB[:a]
ds2 = DB[:a].select(:a, :b).where(:c=>1).order(:d, :e)
.sql .all (1 row)
ds1 140% 11%
ds2 187% 32%
* Regular association loading now uses a placeholder literalizer
in most cases, for up to an 85% improvement when loading
simple associations.
* Eager loading associations using Dataset#eager now uses a
placeholder literalizer in most cases, for up to a
20% improvement when eager loading simple associations.
* Eager loading associations with limits using Dataset#eager now
uses a UNION-based strategy by default. After extensive
testing, this was found to be the fastest strategy if the
key columns are indexed. Unfortunately, it is a much slower
strategy if the key columns are not indexed. You can override
the default UNION strategy by using the :eager_limit_strategy
association option.
On some databases, execution time of UNION queries with n subqueries
increases faster than O(n). Also, there are limits on the number of
subqueries supported in a single UNION query. Sequel chooses a
default limit of 40 subqueries per UNION query. You can increase
this via the :subqueries_per_union association option.
* Dataset#import and #multi_insert can now insert multiple rows
in a single query on H2, HSQLDB, Derby, SQLAnywhere, CUBRID,
SQLite, Oracle, DB2, and Firebird, which should be significantly
faster than previous versions that issued a separate INSERT query
per row.
* The many_to_many setter method in the association_pks plugin now
uses Dataset#import to insert many rows at once, instead of using
a seperate query per insert.
* The jdbc adapter's type conversion has been rewritten to be
more similar to the other adapters, setting up the type
conversion procs before iterating over results. This increases
performance up to 20%.
* The jdbc/oracle adapter now defaults to a fetch_size of 100,
similar to the oci8-based oracle adapter, significantly improving
performance for large datasets.
= New Features
* Database#transaction now supports an :auto_savepoint option. This
option makes it so that transactions inside the transaction block
automatically use savepoints unless they use the :savepoint=>false
option. This should make testing transactional behavior easier.
* Model.prepared_finder has been added. This has an API similar to
Model.finder, but it uses a prepared statement instead of a
placeholder literalizer. It is less flexible than Model.finder
as prepared statements have fixed SQL, but it may perform better.
* Common table expressions (WITH clauses) are now supported on SQLite
3.8.3+.
* :correlated_subquery has been added as an eager_graph and filter by
association limit strategy for one_to_one and one_to_many
associations. In certain cases it was found that this is faster
than the :window_function limit strategy. It is the default
filter by associations limit strategy on databases that do not
support window functions.
Filtering by limited associations using a correlated subquery
strategy does not work in all cases, but it should handle most
cases correctly.
* The prepared_statement_associations plugin now handles
one_through_one and one_through_many associations.
* Sequel now emulates support for offsets without limits on MySQL,
SQLite, H2, SQLAnywhere, and CUBRID.
* In the jdbc adapter, the Database#fetch_size accessor and
:fetch_size option can be used to automatically set the JDBC
fetch size for JDBC Statement objects created by the database.
* Dataset#with_fetch_size has been added to jdbc adapter datasets,
setting the fetch size to use on ResultSets generated by the
dataset. This generally has the effect of overriding the
Database fetch_size setting.
* On MySQL 5.6.5+, Sequel supports a :fractional_seconds Database
option, which will use fractional seconds for timestamp values,
and have the schema modification code create timestamp columns
that accept fractional timestamps by default.
* Database#call_mssql_sproc on Microsoft SQL Server now handles named
parameters:
DB.call_mssql_sproc(:sproc_name, :args => {
'input_arg1_name' => 'input arg1 value',
'input_arg2_name' => 'input arg2 value',
'output_arg_name' => [:output, 'int', 'result key name']
})
* Database#drop_view now supports an :if_exists option on SQLite,
MySQL, H2, and HSQLDB.
* Database#drop_table now supports an :if_exists option on HSQLDB.
* A :filter_limit_strategy association option has been added, for
choosing the strategy that will be used when filtering/excluding by
associations with limits. For backwards compatibility, Sequel will
fallback to looking at the :eager_limit_strategy option.
* A :server_version Database option is now supported on Microsoft SQL
Server, which will use the value given instead of querying for it.
= Other Improvements
* Dataset::PlaceholderLiteralizer arguments are how handled
correctly when emulating offsets via the row_number window function
on DB2, MSSQL <=2012, and Oracle.
* Dataset::PlaceholderLiteralizer now handles DelayedEvaluation
objects correctly.
* Offset emulation is skipped if static SQL is used on Access,
DB2, and MSSQL <=2008.
* Additional disconnect errors are now recognized in the postgres
adapter.
* The :foreign_key_constraint_name option is now respected when
adding a foreign key column to an existing table on MySQL.
* Sequel now attempts to work around a bug on MySQL 5.6+ when
combining DROP FOREIGN KEY and DROP INDEX in the same ALTER TABLE
statement.
* Dataset#for_update is now respected on H2.
* Timestamp with local time zone types are now returned as
Time/DateTime objects on jdbc/oracle.
* Model.include now has the same API as Module.include.
* Model#marshallable! now works correctly when using the
tactical_eager_loading plugin.
* The pg_array_associations plugin now attempts to automatically
determine the correct array type to use, and explicitly casts
to that array type in more places.
* The auto_validations plugin now handles models that select from
subqueries.
* The association_pks plugin does no longer creates getter and setter
methods for one_through_one associations.
* bin/sequel now uses the Sequel code in the related lib directory.
This makes it easier to use from a repository checkout.
= Backwards Compatibility
* AssociationReflection#associated_dataset now returns a joined
dataset for associations that require joins (e.g. many_to_many).
Anyone using this directly for associations that require joins
probably needs to update their code.
* Model.associate now adds the association instance methods instead
of relying on the def_#{association_type} method doing so. Anyone
using custom association types probably needs to update their code.
* Model.eager_loading_dataset, .apply_association_dataset_opts, and
.def_{add_method,association_dataset_methods,remove_methods}
are now deprecated.
* Key conditions for associations requiring joins have been moved
from the JOIN ON clause to the WHERE clause. This should be
optimized the same by the database, but it can break tests that
expect specific SQL.
* Dataset#_insert_sql and #_update_sql are now private instead of
protected.
* The install/uninstall rake tasks have been removed.
* Model association and association reflection internals have
changed significantly, if you were relying on them, you'll
probably need to update your code.
* Database transaction internals have changed significantly, if you
were relying on them, you'll probably need to update your code.
* Dataset literalization internals have changed significantly, with
the Dataset#*_clause_methods private methods being removed.
Custom adapters that used these methods should switch to using the
new Dataset.def_sql_method method.
* Common table expressions are no longer enabled by default in
Sequel. External adapters for databases that support common
table expressions should define Dataset#supports_cte?(type) to
return true.
* Support for RETURNING is no longer determined via introspection.
External adapters for databases that support RETURNING should
define Dataset#supports_returning?(type) to return true.
* The new jdbc adapter type conversion code may not be completely
compatible with the previous code. The currently known case
where it is different is on jdbc/postgresql, when using an
array type where no conversion proc exists, the returned object
will be a ruby array containing java objects, instead of a ruby
array containing ruby objects. It is recommended that
jdbc/postgresql users using array types use the pg_array extension
to avoid this issue.
sequel-5.63.0/doc/release_notes/4.11.0.txt 0000664 0000000 0000000 00000013670 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New SQL Function Features
* SQL::Function now supports an options hash for functions.
Unfortunately, since SQL::Function#initialize does not support
an options hash, you need to use SQL::Function.new! to create
a function with an options hash. You can also call methods on
the SQL::Function instance, which will return a new SQL::Function
with the appropriate option set.
* SQL::Function#quoted has been added, which will return a new
SQL::Function instance that will quote the function name (if
the database supports quoting function names).
* SQL::Function#unquoted has been added, which will return a new
SQL::Function instance that will not quote the function name.
* SQL::Function#lateral has been added, which will return a new
SQL::Function instance that will be preceded by LATERAL when
literalized, useful for set-returning functions.
* SQL::Function#within_group has been added, for creating
ordered-set and hypothetical-set functions that use WITHIN GROUP.
* SQL::Function#filter has been added, for creating filtered
aggregate function calls using FILTER.
* SQL::Function#with_ordinality has been added, for creating set
returning functions that also include a row number for every
row in the set, using WITH ORDINALITY.
= New PostgreSQL Features
* The jsonb type added in 9.4 is now supported in the pg_json
extension. To create a jsonb type manually, you need to call
Sequel.pg_jsonb.
The new json and jsonb functions and operators added in 9.4 are
now supported in the pg_json_ops extension. You can use the jsonb
functions and operators by creating a Postgres::JSONBOp using
Sequel.pg_jsonb_op.
* Database#full_text_search now takes a :rank option to order by the
ranking.
* Database#refresh_view now supports a :concurrently option, to
refresh a materialized view concurrently, supported on 9.4+.
* Postgres::ArrayOp#cardinality has been added to the pg_array_ops
extension, for easy use of the cardinality method added in 9.4.
* Postgres::ArrayOp#unnest in the pg_array_ops extension now accepts
arguments. PostgreSQL 9.4+ supports this if unnest is used in the
FROM clause.
= Other New Features
* Sequel now supports derived column lists (table aliases that include
column aliases) via Sequel.as and SQL::AliasedMethods#as:
Sequel.as(:table, :alias, [:c1, :c2])
# table AS alias(c1, c2)
Not all databases support this, but it is in SQL92 and Sequel now
supports it by default. Derived column lists make it easier to
alias columns when using set-returning functions.
Dataset#from_self now supports derived column lists via the new
:column_aliases option (which requires the :alias option to take
effect).
* Database#create_view now supports a :check option, to use
WITH CHECK OPTION. You can also use :check=>:local for
WITH LOCAL CHECK OPTION. These clauses make it so when you are
inserting into/updating the view, you can only modify rows in the
underlying table if the result would be returned by the view.
* The :after_connect Database option proc now can accept two
arguments. If the arity of the proc is 2, Sequel will pass both
the connection object and the shard symbol.
* The class_table_inheritance plugin now supports a :model_map
option similar to the single_table_inheritance plugin, allowing
use of the plugin without storing ruby class names in the database.
Note that if you use this option, you must set the correct value
for the kind column manually when creating the row.
* Support for CUBRID/SQLAnywhere emulation has been added to the
mock adapter.
= Other Improvements
* Dataset#import now supports a default slice size, which Sequel
sets to 500 on SQLite as that is the limit that SQLite supports in
a single statement.
* The serialization plugin now only modifies changed_columns in the
setter method if the deserialized value has changed, similar to
how Sequel's standard column setters work. Note that if you are
mutating the deserialized value (i.e. not calling the setter
method), you still need to use the
serialization_modification_detection plugin.
* Plugins that set column values for new objects before creation now
use before_validation instead of before_create, which works better
when the auto_validations plugin is used.
* The :read_only transaction option is now applied per-savepoint on
PostgreSQL. Note that this allows you to have a READ ONLY
savepoint in a READ WRITE transaction, it does not allow you to
have a READ WRITE savepoint in a READ ONLY transaction.
* In the ibm_db adapter, fix warnings when using certain column names.
* Support connecting to a DB2 catalog name in the ibm_db adapter, by
providing a :database option without a :host or :port option.
* The mock adapter now sets an emulated version when using MySQL and
SQLite. Additionally, the emulated version for PostgreSQL and
Microsoft SQL Server has been updated.
= Backwards Compatibility
* External adapters that override Dataset#as_sql_append now need to
have the method accept two arguments.
* Model.eager_loading_dataset, .apply_association_dataset_opts, and
.def_{add_method,association_dataset_methods,remove_methods} have
been removed (they were deprecated in 4.10.0).
* SQL::WindowFunction and SQL::EmulatedFunction classes are now
deprecated, as well as Dataset methods that literalize instances of
these classes. These classes are replaced by using options on
SQL::Function instances.
* Passing a table_alias argument when creating an SQL::JoinClause
manually is no longer supported. You now need to pass the table as
an SQL::AliasedExpression if the table needs to be aliased.
* ASTTransformer no longer transforms the table alias for
SQL::JoinClause. This is for consistency with
SQL::AliasedExpression.
* SQL standard casts are now used in Database#full_text_search, which
can break tests that expect specific SQL.
* The to_dot extension now uses slightly different output for
SQL::Function and SQL::JoinClause instances.
sequel-5.63.0/doc/release_notes/4.12.0.txt 0000664 0000000 0000000 00000010154 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* Database#schema now includes :max_length entries for string
columns, specifying the size of the string field. The
auto_validations plugin now uses this information to
automatically set up max_length validations on those fields.
* The Dataset join methods now support a :reset_implicit_qualifier
option. If set to false, this makes the join not reset the
implicit qualifier, so that the next join will not consider this
table as the last table joined. Example:
DB[:a].join(:b, :c=>:d).
join(:e, :f=>:g)
# SELECT * FROM a
# INNER JOIN b ON (b.c = a.d)
# INNER JOIN e ON (e.f = b.g)
DB[:a].join(:b, {:c=>:d}, :reset_implicit_qualifier=>false).
join(:e, :f=>:g)
# SELECT * FROM a
# INNER JOIN b ON (b.c = a.d)
# INNER JOIN e ON (e.f = a.g)
* The Dataset cross and natural join methods now accept an options
hash. Example:
DB[:a].cross_join(:b, :table_alias=>:c)
# SELECT * FROM a CROSS JOIN b AS c
* Model#set_nested_attributes has been added to the nested_attributes
plugin, which allows you to to set the nested_attributes options to
use per-call. This is very helpful if you have multiple forms that
handle associated objects, but with different input fields used
for the associated objects depending on the form. Example:
album.set_nested_attributes(:tracks,
params[:track_attributes],
:fields=>[:a, :b, :c])
* Database#values has been added on PostgreSQL, which creates a
dataset that uses VALUES instead of SELECT. Just as PostgreSQL
allows, you can also use orders, limits, and offsets with this
dataset.
* A :notice_receiver option is now supported in the postgres adapter
if the pg driver is used. This should be a proc, which will be
passed to the pg connection's set_notice_receiver method.
* A Database :readonly option is now supported in the sqlite adapter,
which opens the database in a read-only mode, causing an error
if a query is issued that would modify the database.
* A :before_thread_exit option has been added to
Database#listen_for_static_cache_updates in the
pg_static_cache_updater extension, allowing you to run code before
the created thread exits.
= Other Improvements
* Eager loading limited associations using a UNION now works
correctly when an association block is used. This fixes a
regression that first occurred in 4.10.0, when the union
eager loader became the default eager loader.
* When creating a new associated object in the nested_attributes
plugin, where the reciprocal association is a many_to_one
association, set the cached reciprocal object in the new
associated object before saving it.
This fixes issues when validations in the associated object
require access to the current object, which may not yet be
saved in the database.
* The prepared_statements and prepared_statements_associations
plugins now automatically use explicit column references when
preparing statements. This fixes issues on PostgreSQL when a
column is added to a table while a prepared statement exists
that selects * from the table. Previously, all further attempts
to use the prepared statement will fail.
This allows you to run migrations that add columns to tables
while concurrently running an application that uses the
prepared statements plugins. Note that many other schema
modifications can cause issues when running migrations
while concurrently running an application, but most of those
are not specific to usage of prepared statements.
* Dataset#insert_select on PostgreSQL now respects an existing
RETURNING clause, and won't override it to use RETURNING *.
A similar fix was applied to the generalized prepared statements
support as well.
* The interval parser in the pg_interval extension now supports
intervals with 2-10 digits for hours. Previously, it only
supported using 2 digits for hours.
= Backwards Compatibility
* The methods and classes deprecated in 4.11.0 have been removed.
* The nested_attributes internal API has changed significantly. If
you were calling any private nested_attributes methods, you'll
probably need to update your code.
sequel-5.63.0/doc/release_notes/4.13.0.txt 0000664 0000000 0000000 00000014550 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* A modification_detection plugin has been added, for automatic
detection of in-place column value modifications. This makes
it so you don't have to call Model#modified! manually when
changing a value in place.
* A column_select plugin has been added, for automatically
selecting explicitly qualified columns in model datasets.
Example:
Sequel::Model.plugin :column_select
class Album < Sequel::Model
end
Album.dataset.sql
# SELECT albums.id, albums.name, albums.artist_id
# FROM albums
* An insert_returning_select plugin has been added, for automatically
setting up RETURNING clauses for models that select explicit
columns. This is useful when using the column_select or
lazy_attributes plugins.
* A pg_enum extension has been added, for easier dealing with
PostgreSQL enum types. The possible values for the type
are then returned in the schema hashes under the :enum_values
key. It also adds create_enum, drop_enum, and add_enum_value
Database methods for migration support.
* A round_timestamps extension has been added, for automatically
rounding timestamps to database supported precision when
literalizing.
* A dataset_source_alias extension has been added, for automatically
aliasing datasets to their first source, instead of using t1, t2.
Example:
DB.from(:a, DB[:b]).sql
# SELECT * FROM a, (SELECT * FROM b) AS t1
DB.extension(:dataset_source_alias)
DB.from(:a, DB[:b]).sql
# SELECT * FROM a, (SELECT * FROM b) AS b
* On Microsoft SQL Server, Sequel now emulates RETURNING support
using the OUTPUT clause, as long as only simple column references
are used.
= Other Improvements
* A regression has been fixed in the timestamps and table
inheritance plugins, where column values would not be
saved when skipping validations. This was first broken in
4.11.0.
* A regression has been fixed on JRuby and Rubinius when using
Sequel::Model(dataset) if the dataset needs to literalize a
symbol (and most do). This was first broken in 4.10.0.
* Primary keys are now automatically setup for models even if
the models select specific columns.
* The lazy_attributes plugin now uses qualified columns in its
selection, instead of unqualified columns.
* When looking up model instances by primary key, Sequel now uses a
qualified primary key if the model uses a joined dataset.
* For associations that require joins, Sequel will now use the
associated model's selection directly (instead of
associated_table.*) if the associated model's selection consists
solely of qualified columns.
Among other things, this means that a many_to_many association to
a model that uses lazy attributes will not eagerly load the lazy
attributes by default.
* Model#save now uses insert_select if there is an existing
RETURNING clause used by the underlying dataset, even if the model
selects specific columns.
* In Dataset#insert, aliased tables are now automatically unaliased.
This allows you to use a dataset with an aliased table and have
full SELECT/INSERT/UPDATE/DELETE support, assuming the database
supports aliased tables in UPDATE and DELETE.
* Dataset#graph now qualifies columns correctly if the current
dataset is a joined dataset and it moves the current dataset to
a subselect.
* Dataset#joined_dataset? is now a public method, and can be used to
determine whether the dataset uses a join, either explicitly via
JOIN or implicitly via multiple FROM tables.
* The Dataset#unqualified_column_for helper method has been added,
returning the unqualified version of a possibly qualified column.
* The composition and serialization plugins now support validations
on the underlying columns. Previously, they didn't update the
underlying columns until after validations were performed. This
works better when using the auto_validations plugin.
* The class_table_inheritance plugin now uses JOIN ON instead of
JOIN USING, which makes it work on all databases that Sequel
supports. Additionally, the plugin now explicitly selects
qualified columns from all of the tables.
* The list plugin now adds an after_destroy hook that will renumber
rows after the current row, similar to how moving existing values
in the list works.
* The pg_json extension is now faster when json column value is a
plain string, number, true, false, or nil, if the underlying json
library handles such values natively.
* External jdbc, odbc, and do subadapters can now be loaded
automatically without requiring them first, assuming proper
support in the external subadapter.
* When using create_table on MySQL, correctly handle the :key
option to when calling foreign_key with a column reference.
* On Oracle, use all_tab_cols instead of user_tab_cols for getting
default values when parsing the schema. This makes it work if the
user does not own the table.
* On Oracle, use all_tables and all_views for Database#tables and
Database#views. This works better for users with limited rights.
* Additional disconnect errors are now recognized in the postgres and
jdbc/mysql adapters.
* Sequel::Model now uses copy constructors (e.g. initialize_copy)
instead of overriding #dup and #clone.
* The rake default task now runs plugin specs in addition to
core and model specs.
= bin/sequel Improvements
* Add the sequel lib directory to the front of the load path
instead of the end, fixing cases where you end up requiring an
old version of the sequel gem (e.g. by using sequel_pg).
* Add the sequel lib directory as an absolute path, fixing cases
where you later change the current directory.
* Require sequel later in the code, so that bin/sequel -h doesn't
need to require sequel, and full backtrace is not printed if
requiring sequel raises an error (unless -t is used).
* If an exception is raised, put a newline between the exception
message and backtrace.
* Don't allow usage of -C with any of -cdDmS.
* If sequel -v is given along with a database or code string to
execute, print the Sequel version but also continue, similar
to how ruby -v works.
= Backwards Compatibility
* The switch from JOIN ON to JOIN USING in the
class_table_inheritance can break certain usage, such as querying
using unqualified primary key. Users should switch to using a
qualified primary key instead.
* Calling Dataset#returning when the underlying database does not
support it now raises an Error.
sequel-5.63.0/doc/release_notes/4.14.0.txt 0000664 0000000 0000000 00000005374 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* Delayed evaluation blocks can now accept the dataset literalizing
the delayed evaluation as an argument. This makes it so the
delayed evaluation result can depend on the dataset doing the
literalization:
ds = DB[:a].where(Sequel.delay do |ds|
{Sequel.qualify(ds.first_source, :col)=>1}
end)
ds.sql # SELECT * FROM a WHERE (a.col = 1)
ds.from(:b).sql # SELECT * FROM b WHERE (b.col = 1)
* Database#create_trigger on PostgreSQL now supports a :when option
to create a filter for the trigger, so that it is only triggered
when the filter condition is true.
* You can now override the cache key prefix in the caching plugin
by overriding the cache_key_prefix class method. This can be
useful when using a table inheritance plugin.
= Other Improvements
* You can now pass arbitrary types to Dataset#where and related
methods. Previously, if a type was not explicitly handled, an
exception would be raised. Now you can pass any object that can
be literalized. The only exception is that you can't pass
Numeric objects, since #where and similar methods should
only deal with boolean expressions.
* association_join and related methods now work correctly if the
dataset already has an explicit selection.
* A regression has been fixed in the class_table_inheritance plugin
when using a hierarchy of more than 2 levels, when using the
superclass to load a subclass instance more than 2 levels below,
and later attempting to load a column contained in one of the
middle tables.
* When using _delete or _remove keys in the nested_attributes plugin
to remove existing associated objects, the associated objects are
now deleted from the cached association array at time of call.
This is for consistency when adding new associated objects, where
the new associated objects are added to the cached association
array at time of call.
* The nested_attributes plugin now handles composite primary keys
correctly when working around validation issues for one_to_one
and one_to_many associations.
* If exception A is raised during a transaction, and exception B
is raised while attempting to rollback the transaction, the
transaction code will now raise exception A instead of exception B.
* An additional serialization failure is now detected on PostgreSQL.
* An additional disconnect error is now recognized in the jdbc/jtds
adapter.
* The code examples in the RDoc are now syntax highlighted, and
many minor fixes to the code examples in the RDoc were made.
Additionally, many other improvements were made to the RDoc.
= Backwards Compatibility
* Dataset#delayed_evaluation_sql_append now accepts the delayed
evaluation as an argument, instead of the callable contained by the
delayed evaluation.
sequel-5.63.0/doc/release_notes/4.15.0.txt 0000664 0000000 0000000 00000004121 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* fdbsql and jdbc/fdbsql adapters have been added, for connecting to
FoundationDB SQL Layer.
* A split_values plugin has been added, for moving non-column entries
from the values hash into a separate hash. This allows you to
select additional columns (e.g. computed columns) when retrieving
model instances, and be able to save those instances without
removing the additional columns.
* A Sequel::Model.cache_associations accessor has been added, which
can be set to false to not cache any association metadata. This
can fix issues in a development environment that uses code
reloading.
* The active_model plugin now supports activemodel 4.2.0beta1.
* More PostgreSQL array types are handled automatically by the
pg_array extension, such as xml[] and uuid[].
* Creating foreign tables is now supported on PostgreSQL via the
:foreign and :options create_table options.
* The :nolog Database option is now supported in the informix
adapter, where it disables the use of transactions.
* PlaceholderLiteralizer#with_dataset has been added, allowing you
to create another PlaceholderLiteralizer with a modified dataset,
useful if you want to change the row_proc or any non-SQL dataset
options.
= Other Improvements
* The tactical_eager_loading plugin once again works correctly with
limited associations.
* A bug in older versions of MySQL is now worked around when schema
dumping a table with multiple timestamp columns.
* On PostgreSQL, create_view(:view_name, dataset, :materialized=>true)
is now reversible.
* Postgres::{JSON,JSONB}Op#to_record and #to_recordset no longer take
an optional argument. This was supported in PostgreSQL 9.4beta1,
but removed before PostgreSQL 9.4beta2.
* Dataset#insert now returns the last inserted id in the informix
adapter.
* Sequel no longer raises an exception in
AssociationReflection#reciprocal if the associated class has an
association that does not have a valid associated class.
* Sequel now raises an exception if a primary key is necessary to use
an association, but the model does not have a primary key.
sequel-5.63.0/doc/release_notes/4.16.0.txt 0000664 0000000 0000000 00000002330 14342141206 0017762 0 ustar 00root root 0000000 0000000 = New Features
* Model#qualified_pk_hash has been added, which is similar to
Model#pk_hash, but uses qualified keys.
* Dataset#distinct now accepts a virtual row block.
* Database#drop_table with :foreign=>true option now drops foreign
tables on PostgreSQL. Database#create_table with :foreign option
is now reversible on PostgreSQL.
= Other Improvements
* Sequel::Model.cache_associations = false now skips the database's
schema cache when loading the schema for a model. This fixes
some issues in environments that use code reloading.
* Database#create_table? and #create_join_table? no longer use
IF NOT EXISTS if indexes are being created.
* Model.primary_key_hash and .qualified_primary_key_hash have been
optimized.
* validates_unique in the validation_helpers plugin now uses a
qualified primary key if the model's dataset is joined. This fixes
a case when the auto_validations and class_table_inheritance
plugins are used together.
* Disconnect errors are now recognized in the postgres adapter when
SSL is used for connecting.
* Empty string default values are no longer converted to nil default
values on MySQL.
* Database#foreign_key_list now works correctly on Microsoft SQL
Server 2005.
sequel-5.63.0/doc/release_notes/4.17.0.txt 0000664 0000000 0000000 00000003037 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* A :preconnect Database option has been added, for automatically
creating the maximum number of connections to the database on
instantiation. This is useful when there is high latency for
initial connection setup, where Sequel's usual approach of
connecting as needed can cause pauses at runtime.
* Database#sharded? has been added for checking whether the Database
object uses multiple servers.
* Dataset#server? has been added, for returning a cloned dataset
associated with the given server/shard if the dataset does not
already have a server set. This returns the receiver if the
server has already been set or the Database is not sharded.
= Other Improvements
* Sequel now uses the correct shard when deleting model instances.
Previously, the correct shard was only used in the unoptimized
case, not in the optimized case.
* Sequel now uses the correct shard when using Dataset#insert_select
on PostgreSQL. This was first broken in the 4.13.0 release.
* Sequel now correctly handles Sequel::SQL::Blob instances used in
bound variables in the postgres adapter. Previously this resulted
in duplicate apostrophes being used.
* When using the jdbc/sqlite3 adapter with jdbc-sqlite3 3.8.7, Sequel
now handles date objects and empty blobs correctly, working around
bugs in the driver.
= Backwards Compatibility
* In the update_or_create plugin, Model.update_or_create now always
returns the object. Previously it would not return the object if
the object already existed but no updates were necessary.
sequel-5.63.0/doc/release_notes/4.18.0.txt 0000664 0000000 0000000 00000002667 14342141206 0020001 0 ustar 00root root 0000000 0000000 = New Features
* An :auto_increment key has been added to the schema information for
primary key columns on JDBC, PostgreSQL, MySQL, MSSQL, DB2, and
SQLite. This fixes issues in the schema_dumper extension where
non-auto-incrementing integer primary keys are no longer dumped as
auto-incrementing.
For adapters that don't have specific support for detecting
auto incrementing primary keys, Sequel now assumes a primary key
is auto incrementing only if it is not a composite primary key
and the type contains int (e.g. int, integer, bigint).
= Other Improvements
* Dataset#empty? now ignores any order on the dataset. Previously,
calling empty? on a dataset ordered by an alias in the SELECT list
could raise an exception.
* Schema qualified tables are now handled correctly in
many_through_many associations.
* Using a hash as the value for the :eager association option now
works correctly.
* All PG::ConnectionBad exceptions are now treated as disconnect
errors in the postgres adapter. This should be more robust than
the previous method of trying to recognize disconnect errors by
trying to parse the exception message.
* Sequel now skips a hash allocation when issuing queries through
datasets if sharding is not used.
* Sequel no longer uses the JDBC schema parsing in the jdbc/sqlserver
adapter. Instead, it uses the MSSQL schema parsing, which should
be more accurate than the generic JDBC schema parsing.
sequel-5.63.0/doc/release_notes/4.19.0.txt 0000664 0000000 0000000 00000004143 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Features
* Model#get_column_value and #set_column_value have been added for
getting/setting column values. Historically, to get column
values, you would just send the column name, and to set column
values you would send the column name suffixed by =. However,
this doesn't work when such methods are already defined by
ruby or Sequel itself (e.g. class, model, object_id).
Both #get_column_value and #set_column_value are just aliases to
#send, but you can safely override the methods to handle column
names that conflict with existing method names. Both the core
model code and all of the plugins that ship with Sequel have
been updated to use these new methods. External plugins are
strongly encouraged to switch to these new methods.
* A column_conflicts plugin has been added to automatically handle
columns that conflict with existing method names. So if you
have a column named "model" in your table, you can just load the
column_conflicts plugin and Sequel will handle things correctly.
* A accessed_columns plugin has been added, which records which
columns have been accessed for a model instance. This is useful
in development when you are planning on restricted the columns
selected by the dataset that retrieved the instance. SELECTing
only the columns you need can result in significant performance
increases, and the accessed_columns plugin makes that easier.
* Model#cancel_action has been added for canceling actions in
before hooks, instead of having the before hook methods return
false (which is still supported). In addition to being easier to
use, this also makes it possible to use custom exception messages
for hook failures, if you are using the default behavior of raising
exceptions on save failures.
= Other Improvements
* Dataset#union, #intersect, and #except now automatically handle
datasets with raw SQL, by wrapping such datasets in subqueries.
* The integer migrator now stores the correct migration number
when migrating with allow_missing_migration_files set.
* A :timeout=>nil Database option on MySQL no longer sets a
wait_timeout.
sequel-5.63.0/doc/release_notes/4.2.0.txt 0000664 0000000 0000000 00000012324 14342141206 0017701 0 ustar 00root root 0000000 0000000 = New Features
* LATERAL subqueries are now supported on PostgreSQL 9.3+, HSQLDB,
and DB2 via Dataset#lateral:
DB.from(:a, DB[:b].where(:c=>:a__d).lateral)
# SELECT * FROM a,
# LATERAL (SELECT * FROM b WHERE (c = a.d)) AS t1
You can use a similar syntax when joining tables:
DB[:a].cross_join(DB[:b].where(:c=>:a__d).lateral)
# SELECT * FROM a
# CROSS JOIN LATERAL (SELECT * FROM b WHERE (c = a.d)) AS t1
If you are using Microsoft SQL Server, you can use the new
mssql_emulate_lateral_with_apply extension to emulate LATERAL
subqueries via CROSS/OUTER APPLY.
* The static_cache plugin now supports a :frozen=>false option. When
this option is used, instead of returning the frozen cached values,
the model now returns new, unfrozen objects that can be modified.
Note that if you make any database modifications, you are
responsible for updating the cache manually.
* A pg_static_cache_updater extension has been added. This extension
can automatically update the caches used by the static_cache plugin,
whenever the underlying database table is updated, using PostgreSQL's
notification channels.
This works by defining triggers on the underlying model tables that
use NOTIFY, and spinning up a thread in your application processes
that uses LISTEN, and refreshes the cache for the related model
whenever it receives a notification that the underlying table has
been modified.
This extension should make it possible to use the static_cache plugin
with the :frozen=>false option for any table that is small and not
frequently updated.
* A from_block extension has been added that makes Database#from operate
like Dataset#from in regards to a passed block, allowing you to write
code like:
DB.from{table_returning_function(arg1, arg2)}
* Database#supports_partial_indexes? has been added for checking for
partial index support. Partial indexes are now supported on SQLite
3.8.0+.
* A pg_loose_count extension has been added for fast approximate counts
of PostgreSQL tables. This uses the system tables and should be
fairly accurate if the table statistics are up to date:
DB.loose_count(:table)
* The Dataset#use_cursor method in the postgres adapter now supports
a :cursor_name option. You can set this option if you want to
use nested cursors.
* The mysql2 adapter now supports a :flags Database option allowing to
set custom mysql2 flags (e.g. ::Mysql2::Client::MULTI_STATEMENTS).
= Other Improvements
* Dataset#freeze has been implemented. Previously, it was not
implemented, so Object#freeze was used, which resulted in a dataset
that wasn't cloneable. Dataset#freeze now works as expected,
resulting in a cloneable dataset, but it doesn't allow methods to
be called that mutate the receiver.
* Dataset#dup has been implemented. Previously, it was not
implemented, so Object#dup was used, which resulted in a dataset
that shared an options hash with the receiver, so modifying the
dup's opts could also change the original dataset. Now dup works
similarly to clone, except that the returned object will not be
frozen.
* Model#dup has been implemented. Previously, it was not implemented,
so Object#dup was used, which resulted in a model instance that
shared the values hash with the receiver, so modifying the dup's
values also changed the original's values. Now, dup does a shallow
copy of some of the internal data structures as well, so the copy
is more independent.
Note that you still need to be careful if you mutate objects:
m = Model.new(:a=>'a')
m2 = m.dup
m.a.gsub!('a', 'b') # also changes m2
* Model#clone has been implemented. Previously, it had the same
issues as dup. Now, it calls the new Model#dup, but also
freezes the returned object if the receiver is frozen.
* Placeholder literal strings with an empty parameter hash are now
handled correctly.
= Backwards Compatibility
* The static_cache plugin now disallows saving/destroying instances
unless the :frozen=>false option is used. As the cached objects
returned by the model were frozen anyway, this affects creating
new instances or saving/destroying instances returned from the
underlying dataset.
* Model#set_values has been removed (it was deprecated starting in
Sequel 4.0).
* The following Model class methods are no longer defined:
insert_multiple, set, to_csv, paginate, query, set_overrides,
set_defaults. By default, these methods used call the
dataset method of the same name, but as those methods are no
longer defined on datasets by default, they also resulted in a
NoMethodError.
* Dataset#query!, #set_defaults!, and #set_overrides! are no longer
defined on all datasets. They are now only defined on datasets
that use the query or set_overrides extensions.
* Partial indexes are no longer returned by Database#indexes on MSSQL,
for consistency with PostgreSQL. Note that the same change was
desired for SQLite, but SQLite currently does not offer reflection
support for determining which indexes are partial.
* Database#foreign_key_list on MSSQL now will return a
SQL::QualifiedIdentifier instead of a symbol for the :table entry if
the schema of the referenced table does not match the schema of the
referencing table.
sequel-5.63.0/doc/release_notes/4.20.0.txt 0000664 0000000 0000000 00000006426 14342141206 0017767 0 ustar 00root root 0000000 0000000 = New Features
* A :before_retry option has been added to Database#transaction, which
specifies a proc to call when retrying if the :retry_on option
is used. This can be used to implement additional logging, sleeping
between retries, or other things.
* The to_json method :root option in the json_serializer plugin can now
be a string value to specify the name for the object key, instead of
using the underscored model name.
* Dataset#paged_each now returns an enumerator if not passed a block.
* You can now set the :instance_specific association option to false.
Previously, it was automatically set to true in some cases. If you
know the association does not depend on anything instance-specific
other than the foreign/primary key, setting this option can allow
Sequel to perform some additional optimizations.
= Other Improvements
* Eager loading queries are now skipped if there are no matching keys.
There was code to check this previously, but it was accidently
removed in an earlier refactoring.
* Eager loading an association with a limit and an eager block and
cascaded associations now works correctly when the window_function
limit strategy is used (the default on databases that support
window functions).
* Eager loading an association with a limit with an eager block now
works correctly on databases do not support window functions but do
support correlated subqueries.
* The parent association is now set on associated objects when loading
descendants in the rcte_tree plugin. This allows the parent method
on any of the descendants to work without issuing a database query.
* The prepared_statements_associations plugin now only uses prepared
statements if association metadata is being cached. Previously, it
would use prepared statements even if association metadata was not
cached, which could leak the prepared statements.
* Model#dup now duplicates the associations hash for the object.
* Model#freeze no longer validates an object if the the errors for the
object are already frozen. The static_cache plugin now freezes the
errors before freezing the object, so that it doesn't validate the
object. This can skip many database queries when the
auto_validations plugin is used and there is a unique constraint or
index on the related table.
* AUTOINCREMENT is now used again on SQLite by default for primary
keys. It was removed when :auto_increment was added to the schema
hashes, but the removal changed SQLite's behavior. This restores
the previous behavior.
* Microsoft SQL Server's bit type is now recognized as a boolean type
by the schema dumper.
* The pg_enum extension's create_enum method can now be used in
reversible migrations.
* set_column_type with the :auto_increment=>true option once again
works on MySQL. It had been broken since Sequel started adding
:auto_increment to the schema hashes.
* The mysql2 adapter now recognizes the :charset option as a synonym
for :encoding.
* The swift adapter now respects database and application timezone
settings.
= Backwards Compatibility
* AssociationReflection#apply_ruby_eager_limit_strategy no longer
checks that the strategy is :ruby, callers are now expected to
check the value themselves. This should only matter if you are
using custom association types.
sequel-5.63.0/doc/release_notes/4.21.0.txt 0000664 0000000 0000000 00000006460 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* SQL::GenericExpression#=~ has been added as an alternative method
of specifying equality/inclusion/identity. Previously, you had to
use a hash. This led to some slightly weird looking syntax when
used inside virtual rows:
DB[:items].where{{function(:column)=>0}}
# SELECT FROM items WHERE function(column) = 0
You can now use =~ as an equivalent:
DB[:items].where{function(:column) =~ 0}
# SELECT FROM items WHERE function(column) = 0
Like when using a hash, this works also for inclusion:
DB[:items].where{function(:column) =~ [1,2,3]}
# SELECT FROM items WHERE function(column) IN (1, 2, 3)
for identity:
DB[:items].where{function(:column) =~ nil}
# SELECT FROM items WHERE function(column) IS NULL
and for matching (on MySQL/PostgreSQL):
DB[:items].where{function(:column) =~ /foo/i}
# SELECT FROM items WHERE function(column) ~* 'foo'
This new syntax makes more complex conditions simpler to express:
DB[:items].where{(function(:column) =~ 0) | (column =~ 1)}
# SELECT FROM items WHERE function(column) = 0 OR column = 1
compared to previous versions of Sequel:
DB[:items].where{Sequel.|({function(:column) => 0}, {:column => 1})}
On ruby 1.9+, you can also use SQL::GenericExpression#!~ to invert
the condition:
DB[:items].where{function(:column) !~ 0}
# SELECT FROM items WHERE function(column) != 0
DB[:items].where{function(:column) !~ [1,2,3]}
# SELECT FROM items WHERE function(column) NOT IN (1, 2, 3)
DB[:items].where{function(:column) !~ nil}
# SELECT FROM items WHERE function(column) IS NOT NULL
DB[:items].where{function(:column) !~ /foo/i}
# SELECT FROM items WHERE function(column) !~* 'foo'
This makes it simpler to write inverted conditions. Ruby 1.8
doesn't support overriding the !~ method, but you can still use the
unary ~ method to invert:
DB[:items].where{~(function(:column) =~ 0)}
* Database#add_named_conversion_proc has been added on PostgreSQL to
make it easier to add conversion procs by name instead of by OID:
DB.add_named_conversion_proc(:citext){|s| s}
* Database#full_text_search on PostgreSQL now supports :tsquery and
:tsvector options for using existing tsquery and/or tsvector
arguments, instead of assuming the arguments are query terms or
the text to be search.
= Other Improvements
* Database#transaction now works inside after_commit and
after_rollback hooks. Previously, it didn't work correctly as it
thought it was already inside the previously committed/rolled back
transaction.
* Sequel.pg_jsonb now returns JSONBOp instances instead of JSONOp
instances when passed other than Array or Hash.
* The tinytds adapter no longer tries to cancel a query on a closed
connection, which was causing an exception to be raised.
= Backwards Compatibility
* The default root name used in the JSON serializer is now demodulized
before being underscored. This changes the behavior when the model
is namespaced. For example, if the model class name is Mod::Model,
the previous default root name would be "mod/model", the new default
root name is "model".
* If you were calling =~ or !~ on SQL::GenericExpression objects and
expecting the default ruby behavior of returning nil for =~ and
true for !~, you'll have to update your code.
sequel-5.63.0/doc/release_notes/4.22.0.txt 0000664 0000000 0000000 00000005573 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A csv_serializer plugin has been added, for serializing model
objects and datasets to CSV, or parsing CSV into a model
object or array of model objects. Behavior and API is similar to
the existing xml_serializer and json_serializer plugins.
* Sequel::MassAssignmentRestriction is now raised for mass assignment
errors in strict mode (the default). Previously the generic
Sequel::Error was used.
= Other Improvements
* On Ruby 1.9+, Sequel now uses condition variables instead of busy
waiting for connections in the threaded conection pools. This
can significantly decrease overhead when waiting for connections,
which can improve performance on machines that are compute
bottlenecked. This also makes the connection pool checkouts more
fair, reducing the chance that a request for a connection will
fail with a PoolTimeout when under heavy resource contention.
* Sequel now attempts to avoid hash allocations and rehashing in
performance sensitive code. This can speed up Dataset#clone,
Model#clone, and #Model#dup by about 60%, and speed up method
chains such as:
ds.select(:a).where(:a=>1).order(1)
by almost 20%.
* Symbol#to_proc is used instead of explicit blocks across the
the library, which should improve performance slightly on Ruby 1.9+.
* When Model#cancel_action is used in association before hooks,
Sequel will now return false if raise_on_save_failure = false,
instead of raising an exception. This mirrors the behavior
when Model#cancel_action is used inside model save hooks when
raise_on_save_failure = false.
* Dataset#to_hash and #to_hash_groups now work correctly on model
datasets when given a single array argument.
* The auto_validations plugin now works correctly on columns that
have a default value, but where the default value is not parseable
into a ruby object by the adapter.
* The tree plugin now correctly sets the reciprocal association
in the children association it creates.
* In the pg_array extension, if the :default value when creating a
column is set to a ruby array, Sequel will now convert it to a
PostgreSQL array.
* Sequel no longer adds a :max_length entry to the schema for
varchar(max) columns on Microsoft SQL Server.
* Adapters now are specified to set the :default schema entry for
columns to nil if the adapter can determine the :default is nil.
Adapters that ship with Sequel already did this, but previously
it was unspecified behavior.
* Sequel no longer silently ignores the :jdbc_properties Database
option in the jdbc adapter. Previously, it only used the
:jdbc_properties option if it was not able to connect without it.
* Bit types are now converted to boolean values in the ODBC adapter.
= Backwards Compatibility
* The db2, dbi, fdbsql, firebird, jdbc/fdbsql, informix, and openbase
adapters are now deprecated and will be removed in a future version
of Sequel.
sequel-5.63.0/doc/release_notes/4.23.0.txt 0000664 0000000 0000000 00000004606 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* An update_refresh plugin has been added, for refreshing a model
instance when updating. The default behavior is to only refresh
when inserting. However, if you have triggers on the model's table,
it's a good idea to refresh when updating to pick up the possibly
changed values. On databases that support UPDATE RETURNING, such as
PostgreSQL, the update and refresh are done in a single query.
* A delay_add_association plugin has been added, for delaying add_*
method calls for associations until after the receiver has been
saved, if the receiver is a new object. Example:
artist = Artist.new(:name=>'Foo')
artist.add_album(Album.new(:name=>'Bar'))
# No database queries yet
artist.save # Saves artist, then album
* A validate_associated plugin has been added, for validating
associated objects when validating the current object. This
was extracted from the nested_attributes plugin, and is also
used by the delay_add_association plugin. For example,
if you have an albums association and you want to validate all
associated objects before saving the current object, you can
cal validate_associated_object for each object:
def validate
super
reflection = association_reflection(:albums)
associations[:albums].each do |obj|
validate_associated_object(reflection, obj)
end
end
= Other Improvements
* Database#transaction now returns the block return value if
:rollback=>:always is used. Previously, it would return nil in
that case.
* Postgres::JSONBOp#[] and #get_text now return JSONBOp instances
instead of JSONOp instances.
* Model#move_to, #move_up, and #move_down in the list plugin now
automatically handle out-of-range targets by defaulting to the first
or last position in the list. Previously, using an out of range
target would raise an exception.
* Database#add_named_conversion_proc on PostgreSQL now works for enum
types.
* dataset.call_sproc(:insert, ...) now works correctly on JDBC.
* postgresql:// connection strings are now supported, since that is
the protocol name supported by libpq.
* Sequel has switched from rspec to minitest/spec for testing, and
now uses random test order when testing. During the conversion
process, many test order dependency bugs were fixed.
= Backwards Compatibility
* The deprecated fdbsql, jdbc/fdbsql, and openbase adapters have been
removed.
sequel-5.63.0/doc/release_notes/4.24.0.txt 0000664 0000000 0000000 00000010004 14342141206 0017756 0 ustar 00root root 0000000 0000000 = New Features
* A pg_inet_ops extension has been added, for DSL support for
calling PostgreSQL inet functions and operators. Example:
r = Sequel.pg_inet_op(:inet)
~r # ~inet
r & :other # inet & other
r | :other # inet | other
r << :other # inet << other
r >> :other # inet >> other
r.contained_by(:other) # inet << other
r.contained_by_or_equals(:other) # inet <<= other
r.contains(:other) # inet >> other
r.contains_or_equals(:other) # inet >>= other
r.contains_or_contained_by(:other) # inet && other
r.abbrev # abbrev(inet)
r.broadcast # broadcast(inet)
r.family # family(inet)
r.host # host(inet)
r.hostmask # hostmask(inet)
r.masklen # masklen(inet)
r.netmask # netmask(inet)
r.network # network(inet)
r.set_masklen(16) # set_masklen(inet, 16)
r.text # text(inet)
* The association_pks plugin now supports a :delay_pks association
option. When set to true, this makes the methods created by the
plugin usable on new objects, by delaying the saving of the
associated pks until after the new object has been saved. When
set to :always, this also changes the behavior of the methods
for existing objects, so that nothing is persisted until the
object has been saved. Example:
Album.plugin :association_pks
Album.many_to_many :tags, :delay_pks=>true
album = Album.new(:tag_pks=>[1,2,3]) # No database query
album.save # Queries to insert album, and then update albums_tags
* The class_table_inheritance plugin now supports subclasses that
don't require additional columns, and therefore do not need to
join to additional tables. It now loads the
single_table_inheritance plugin and supports options that were
previously only supported by single_table_inheritance, such as the
:key_map and :key_chooser options.
* The validation_helpers plugin now supports a :from=>:values option
in the validation methods, which will take the value directly from
the values hash instead of calling the related method. This
allows validation_helpers to differentiate between validations on
underlying database column and validations on the model.
The auto_validations plugin has been modified to use this feature,
since all validations it generates are for validations on the
underlying database columns.
* The auto_validations plugin now supports options to pass to each
of the underlying validation methods:
Sequel::Model.plugin :auto_validations,
:unique_opts=>{:only_if_modified=>true}
In addition to :unique_opts, there is support for :not_null_opts
(for NOT NULL columns without a default), :explicit_not_null_opts
(for NOT NULL columns with a default), :max_length_opts, and
:schema_types_opts.
* The update_refresh plugin now accepts a :columns option, which
specifies the columns to refresh. This option is currently only
respected if the related dataset supports RETURNING.
* The :timeout option to Database#listen in the postgres adapter can
now be a callable object, previously it had to be Numeric. This
allows you to dynamically change the timeout based on current
application state.
= Other Improvements
* The uniqueness validations added by the auto_validations plugin now
use a symbol key in the related Errors instance if the underlying
index was on a single column. Previously, the uniqueness
validations for a single column would use an array key in the
related Errors instance.
* The jdbc subadapters now correctly handle 64-bit autoincrementing
primary keys.
* The jdbc subadapters now work correctly if they issue queries while
the subadapter is being loaded. This can happen in the
jdbc/postgresql adapter if the pg_hstore extension is used.
= Backwards Compatibility
* The deprecated db2 and dbi adapters have been removed.
sequel-5.63.0/doc/release_notes/4.25.0.txt 0000664 0000000 0000000 00000014402 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* The =~ and !~ methods are now defined on ComplexExpressions in
addition to GenericExpressions, allowing the following code to
work:
DB[:table].where{(column1 + column2) =~ column3}
* Dataset#group_append has been added for appending to an existing
GROUP BY clause:
ds = DB[:table].group(:column1)
# SELECT * FROM table GROUP BY column1
ds = ds.group_append(:column2)
# SELECT * FROM table GROUP BY column1, column2
* An inverted_subsets plugin has been added, for automatic creation of
methods for the inversion of the subset criteria. For example:
Album.plugin :inverted_subsets
Album.subset :published, :published=>true
Album.published
# SELECT * FROM albums WHERE published IS TRUE
Album.not_published
# SELECT * FROM albums WHERE published IS NOT TRUE
By default, the subset method name is prefixed with "not_". You can
pass a block to override the default behavior:
Album.plugin(:inverted_subsets){|name| "exclude_#{name}"}
Album.subset :published, :published=>true
Album.exclude_published
# SELECT * FROM albums WHERE published IS NOT TRUE
* A singular_table_names plugin has been added, which changes Sequel
to not pluralize table names by default.
Sequel::Model.plugin :singular_table_names
class FooBar < Sequel::Model; end
FooBar.table_name # => foo_bar
* Dataset#insert_conflict and #insert_ignore have been added on
PostgreSQL. When using PostgreSQL 9.5+, they allow you to ignore
unique or exclusion constraint violations on inserting, or to do
an update instead:
DB[:table].insert_conflict.insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT DO NOTHING
You can pass a specific constraint name using :constraint, to only
ignore a specific constraint violation:
DB[:table].insert_conflict(:constraint=>:table_a_uidx).
insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT ON CONSTRAINT table_a_uidx DO NOTHING
If the unique or exclusion constraint covers the whole table (e.g.
it isn't a partial unique index), then you can just specify the
column using the :target option:
DB[:table].insert_conflict(:target=>:a).insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO NOTHING
If you want to update the existing row instead of ignoring the
constraint violation, you can pass an :update option with a hash of
values to update. You must pass either the :target or :constraint
options when passing the :update option:
DB[:table].insert_conflict(:target=>:a,
:update=>{:b=>:excluded__b}).
insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO UPDATE SET b = excluded.b
Additionally, if you only want to do the update in certain cases,
you can specify an :update_where option, which will be used as a
filter. If the row doesn't match the conditions, the constraint
violation will be ignored, but the row will not be updated:
DB[:table].insert_conflict(:constraint=>:table_a_uidx,
:update=>{:b=>:excluded__b},
:update_where=>{:table__status_id=>1}).
insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT ON CONSTRAINT table_a_uidx
# DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
* Dataset#group_rollup and #group_cube are now supported when using
PostgreSQL 9.5+.
* Sequel now supports Dataset#returning when using prepared statements
and bound variables:
DB[:table].returning.prepare(:insert, :i, :col=>:$col).
call(:col=>42)
# => [{:col=>42}]
= Other Improvements
* The serialization plugin now integrates with the dirty plugin, so
that column changes are detected correctly. However, column values
that are changed and then changed back to the original value are
still detected as changed.
* Dataset#for_update and similar locking methods now cause Sequel not
to use the :read_only shard if sharding is used.
* The association_pks plugin now clears cached delayed associated pks
when the object is refreshed.
* The :collate column option when adding columns now literalizes
non-String values on PostgreSQL. Previously, the :collate option
value was used verbatim. This is because PostgreSQL's collations
generally require quoting as they are uppercase or mixed-case.
* Sequel's metadata parsing methods now support Microsoft SQL Server
2012+ when used in case sensitive mode.
* Sequel now recognizes an addition check constraint violation
exception on SQLite.
* Sequel now recognizes constraint violations when using the
swift/sqlite adapter.
* Sequel now automatically REORGs tables when altering them in the
jdbc/db2 adapter.
= Backwards Compatibility
* Sequel now defaults to ignoring NULL values when using IN/NOT IN
with an empty array. Previously, code such as:
DB[:table].where(:column=>[])
would be literalized as:
SELECT * FROM table WHERE (column != column)
This yields a NULL value when column is NULL, similarly to how most
other SQL operators work. Unfortunately, most databases do not
optimize this, and such a query can require a sequential scan of the
table.
Sequel previously shipped with a empty_array_ignore_nulls extension
that literalized the query to:
SELECT * FROM table WHERE (1 = 0)
which databases will generally optimize to a constant false value,
resulting in much faster queries. This behavior is now the default.
Users that desire the previous behavior can use the new
empty_array_consider_nulls extension.
* The deprecated firebird and informix adapters have been removed.
* Calling prepare on a prepared statement now raises an exception.
It was supported accidently before, as prepared statements are
dataset instances.
* Model::DatasetModule#subset now calls Model.subset instead of
the other way around. This makes it possible to modify the
behavior of subset in a plugin.
* The :collate column option change on PostgreSQL can break code
that used already quoted values in symbols. For example:
String :column_name, collate=>:'"C"'
would need to change to:
String :column_name, collate=>:C
# or
String :column_name, collate=>'"C"'
sequel-5.63.0/doc/release_notes/4.26.0.txt 0000664 0000000 0000000 00000003267 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* Add Dataset#grouping_sets to support GROUP BY GROUPING SETS on
PostgreSQL 9.5+, MSSQL 2008+, Oracle, DB2, and SQLAnywhere:
DB[:test].group([:type_id, :b], :type_id, []).grouping_sets
# SELECT * FROM test
# GROUP BY GROUPING SETS((type_id, b), (type_id), ())
* Sequel::NoMatchingRow exceptions raised by Sequel now give access
to the dataset that raised the exception via the dataset method.
This makes it easier to write generic error handling code.
* Support :if_exists option to drop_column on PostgreSQL:
DB.drop_column :t, :col, :if_exists=>true
ALTER TABLE t DROP COLUMN IF EXISTS col
= Other Improvements
* Make the class_table_inheritance plugin work correctly without an
sti_key. This was broken in a recent refactoring to make class
table inheritance support multiple classes for a single table.
* Make Class.new(ModelClass){set_dataset :table} work correctly on
ruby 1.8. This was broken in a refactoring to allow the
singular_table_names plugin to work.
* Make offset emulation via ROW_NUMBER better handle ambiguous column
names for datasets without an ORDER BY clause, but with an explicit
SELECT clause.
* Make pg_range extension use PostgreSQL range function constructors
instead of casting string literals to the appropriate range type,
if the range type is known. This allows arbitrary expressions to
be used inside ranges, such as CURRENT_TIMESTAMP in timestamp
ranges.
* Make Dataset#== not consider frozen status.
* Allow Dataset#prepare on already prepared statements in situations
where determining the SQL for a prepared statement requires it.
* Detect additional disconnect errors when using the tinytds adapter.
sequel-5.63.0/doc/release_notes/4.27.0.txt 0000664 0000000 0000000 00000005761 14342141206 0017777 0 ustar 00root root 0000000 0000000 = New Features
* A before_after_save plugin has been added, which for newly
created objects refreshes the object before calling after_create,
and resets the modified flag before calling after_update.
Previously, these actions were not taken until after after_save
was called. This will be the default behavior in Sequel 5.
* In create_table blocks, primary_key now supports a :keep_order
option, which will not change the order in which the primary key
is added. Without this option, Sequel's historical behavior of
making the primary key column the first column is used.
DB.create_table(:foo) do
Integer :a
primary_key :b, :keep_order=>true
end
# CREATE TABLE foo
# (a integer, b integer PRIMARY KEY AUTOINCREMENT)
The schema dumper now uses this option if necessary, allowing it
to correctly dump tables where the primary key column is not the
first column.
* Dataset#single_record! and #single_value! have been added. These
are faster versions of #single_record and #single_value that
don't require cloning the dataset. If you are sure the dataset
will only return a single row or a single value, you can use
these methods for better performance.
* The new jsonb and json functions added in PostgreSQL 9.5 are now
supported by the pg_json_ops extension.
Sequel.pg_jsonb_op(:metadata).set(%w'a b', [1,2,3])
# jsonb_set("metadata", ARRAY['a','b'], '[1,2,3]'::jsonb, true)
= Other Improvements
* Sequel.synchronize is no longer a stub on MRI. Testing has shown
that relying on the global interpreter lock to protect
multi-threaded access to hashes is not safe in all environments,
so Sequel now uses a mutex on MRI just as it does on other ruby
interpreters.
* Database#schema now sets the :auto_increment option correctly for
auto incrementing primary keys if they are not the first column
in the table.
* Dataset#single_value and #with_sql_single_value are now slightly
faster by avoiding an array allocation.
* Model datasets can now use #with_sql_single_value and return a
single value, instead of an array in [:column_name, value] format.
* Model#persisted? in the active_model plugin will now return false
if the transaction that inserts the row for the object is rolled
back.
* bin/sequel now warns if additional arguments are passed that it
ignores. In Sequel 5, bin/sequel will raise an error in these
cases.
* Database#foreign_key_list on PostgreSQL now returns referenced
composite keys in the correct order.
* The postgres adapter now works with postgres-pr 0.7.0. Note that
postgres adapter users that want a pure-ruby driver are encouraged
to use jeremyevans-postgres-pr as that has many additional bugfixes
and is the version tested with Sequel on a regular basis.
* The jdbc/postgresql adapter now recognizes an additional disconnect
error.
= Backwards Compatibility
* Users who were relying on #with_sql_single_value returning an array
instead of a single value for model datasets need to update their
code.
sequel-5.63.0/doc/release_notes/4.28.0.txt 0000664 0000000 0000000 00000003526 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* A subset_conditions plugin has been added, which adds a method
for each subset that returns the filter conditions for the
subset. This makes it easier to reuse the subset conditions:
class Foo < Sequel::Model
plugin :subset_conditions
subset :active, :active=>true
end
Foo.exclude(Foo.active_conditions)
Foo.where(:a=>1).or(Foo.active_conditions)
* A boolean_subsets plugin has been added, which adds a subset for each
boolean column:
# Assume boolean column :active
Foo.plugin :boolean_subsets
Foo.active
# SELECT * FROM foos WHERE (active IS TRUE)
You can provide a block to the plugin to change the arguments passed
to subset:
Foo.plugin :boolean_subsets do |column|
[:"where_#{column}", column]
end
Foo.where_active
# SELECT * FROM foos WHERE active
As with similar plugins, you can add the boolean_subsets plugin to
Sequel::Model itself, and all subclasses created afterward will have
the boolean subset methods automatically created.
= Other Improvements
* If Model#refresh can't find the related row, Sequel now raises a
Sequel::NoExistingObject exception instead of a generic
Sequel::Error exception.
* In the csv_serializer plugin, when calling #to_csv on a model class
or dataset, instead of using #[] to access data, #send is used to
call methods. This is more similar to other plugins as well as
Model#to_csv.
* The list plugin now works better with the auto_validations plugin,
or any other time there is a validation on the position column.
= Backwards Compatibility
* The change to the csv_serializer plugin can change results if you
are overriding any of the column accessor methods. It can also
break existing code if one of the columns being used isn't defined
as a method or the method requires more than one argument.
sequel-5.63.0/doc/release_notes/4.29.0.txt 0000664 0000000 0000000 00000002710 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* A uuid plugin has now been added. This plugin will automatically
create a uuid for newly created model objects.
Model.plugin :uuid
Model.create.uuid => # some UUID
* Model#json_serializer_opts has been added to the json_serializer
plugin, allowing you to override the JSON serialization options
on a per instance basis without passing the options directly
to Model#to_json. This is useful if you are including the model
instance inside another datastructure that will be serialized
to JSON.
obj.json_serializer_opts(:root => true)
[obj].to_json
# => '[{"obj":{"id":1,"name":"Foo"}}]'
= Other Improvements
* The Database#transaction :retry_on option now works when using
savepoints.
* Calling Database#table_exists? inside a transaction will now use
a savepoint if the database supports it, so that if the table
doesn't exist, it will not affect the state of the transaction.
* Blobs can now be used as bound variables in the oracle adapter.
* The sqlanywhere adapter now works with database sharding.
* The Dataset#full_text_search :rank option has been fixed to order
by rank descending instead of ascending.
* External adapters that do not support INSERT with DEFAULT VALUES
can now override Dataset#insert_empty_columns_values to set
the columns and values to use for an empty INSERT.
* External adapters can now implement Dataset#date_add_sql_append
to integrate with the date_arithmetic extension.
sequel-5.63.0/doc/release_notes/4.3.0.txt 0000664 0000000 0000000 00000002433 14342141206 0017702 0 ustar 00root root 0000000 0000000 = New Features
* The tree and rcte_tree plugins now support composite keys.
* An error_sql Database extension has been added. This extension
adds the DatabaseError#sql method, which should return the
database query that caused the error. This is useful for
drivers that don't include the SQL used as part of the error
message.
= Other Improvements
* Empty blobs are now literalized correctly on MySQL.
* Empty arrays are now literalized correctly on PostgreSQL <8.4.
* In the pagination extension, Dataset#page_count is now 1 even if
the dataset is empty. This fixes issues with last_page? and
page_range returning bad values for empty datasets.
* In the pagination extension, calling Dataset#each_page without a
block now returns an Enumerator.
* Dataset#qualify and Sequel.delay now work together, qualifying
the object returned by the delayed evaluation.
* Migrator.migrator_class is now a public method.
* The PostgreSQL citext type is now recognized as a string.
* Another disconnect error is now recognized in the jdbc/as400
adapter.
* Guides about using and creating Sequel extensions and model
plugins have been added.
= Backwards Compatibility
* If you were expecting Dataset#page_count on a empty paginated
dataset to return 0, you need to update your code.
sequel-5.63.0/doc/release_notes/4.30.0.txt 0000664 0000000 0000000 00000002523 14342141206 0017762 0 ustar 00root root 0000000 0000000 = New Features
* Overriding the :limit and :eager_limit_strategy association options
can now be done on a per-call basis when eager loading, by using an
eager block callback and setting the :eager_limit or
:eager_limit_strategy dataset options. Example:
Album.eager(:tracks=>proc{|ds| ds.clone(:eager_limit=>5)}).all
* Dataset#insert_conflict and #insert_ignore have been added on
SQLite, adding support for the INSERT OR ... SQL syntax:
DB[:table].insert_ignore.insert(:a=>1, :b=>2)
# INSERT OR IGNORE INTO TABLE (a, b) VALUES (1, 2)
DB[:table].insert_conflict(:replace).insert(:a=>1, :b=>2)
# INSERT OR REPLACE INTO TABLE (a, b) VALUES (1, 2)
* An identifier_columns plugin has been added, which allows
Sequel::Model#save to work when column names contain double
underscores.
= Other Improvements
* IPv6 addresses can now be used in connection URLs when using
ruby 1.9.3+.
* The :db_type entries in column schema hashes now include sizes
for string and decimal types on DB2 and when using the jdbc
adapter's generic schema parsing.
* Database#row_type in the pg_row extension now handles different
formats of specifying schema qualified types. So a row type
registered via :schema__type can be found using
Sequel.qualify(:schema, :type).
* Another disconnect error is recognized in the tinytds adapter.
sequel-5.63.0/doc/release_notes/4.31.0.txt 0000664 0000000 0000000 00000004452 14342141206 0017766 0 ustar 00root root 0000000 0000000 = Improvements
* Sequel now works with ruby 2.3's --enable-frozen-string-literal,
and all of the library files are set to use frozen string
literals by default.
A couple adapters and extensions depend on libraries that have
issues with frozen string literals. Pull requests have been sent
to each of those dependencies.
* The migrators will now raise an exception if a migration file
contains no migrations or more than one migration.
* The jdbc/postgresql adapter now supports using PostgreSQL specific
types in bound variables. Note that the current version of
jdbc-postgres (9.4.1204) has regressions that affect this, users
who need this support should stick with jdbc-postgres 9.4.1200 or
below.
* The jdbc/postgresql adapter now works around a regression in Java
method lookup in JRuby 9.0.5.0
* The setter methods added by the association_pks plugin now do type
casting immediately, instead of right before the data will be used.
This makes them more similar to column setter methods, and ensures
that future calls to the getters that use cached values will
return correctly typecast data.
* The PostgreSQL array parser in the pg_array extension now handles
arrays with explicit bounds. The explicit bounds are ignored, so
such values do not round trip, and there is currently no support for
creating arrays with explicit bounds.
* Creating a table with a simple non-incrementing primary key and a
self-referential foreign key now works correctly on MySQL:
DB.create_table!(:table) do
Integer :id, :primary_key=>true
foreign_key :fk, :table
end
* Database#disconnect in the oracle adapter now works correctly on
more recent versions of oci8 where #logoff can raise OCIException
instead of OCIInvalidHandle.
= Backwards Compatibility
* The pg_array extension no longer defines
Sequel::Postgres::PGArray::JSONCreator. This should only affect
backwards compatibility if you were accessing the constant directly.
The :parser option to Sequel::Postgres::PGArray.register is also no
longer respected, but that should not affect backwards compatibility.
* The Sequel::Model#convert_cpk_array private method that was added by
the association_pks plugin has been removed.
Sequel::Model#convert_pk_array handles both simple and composite
primary keys now.
sequel-5.63.0/doc/release_notes/4.32.0.txt 0000664 0000000 0000000 00000011317 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* A no_auto_literal_strings extension has been added, which removes the
automatic usage of strings in filter arguments as literal SQL code.
By default, if you do:
DB[:albums].where("name > 'N'")
By default Sequel will treat "name > 'N'" as SQL code. However,
this makes it much easier to introduce SQL injection:
# SQL Injection vulnerability in default Sequel
DB[:albums].where("name > 'params[:letter]'")
Sequel does support using placeholders when using literal strings:
# Safe in default Sequel
DB[:albums].where("name > ?", params[:letter])
However, if you forget to use placeholders, you can end up with SQL
injection. Accidental usage of filter strings derived from user
input as literal SQL code is probably the most common SQL injection
vector in applications using Sequel.
With the no_auto_literal_strings extension, passing a plain string
as the first or only argument to a filter method raises an
exception. If you want to use literal SQL code, you have to do so
explicitly:
DB[:albums].where(Sequel.lit("name > 'N'"))
You can also specify placeholders when using Sequel.lit:
DB[:albums].where(Sequel.lit("name > ?", params[:letter]))
Note that in many cases, you can avoid using literal SQL strings
completely:
DB[:albums].where{|v| v.name > params[:letter]}
* one_through_one associations now support a setter method:
Foo.one_through_one :bar
foo = Foo[1]
foo.bar = Bar[2]
foo.bar = nil
This will check the current entry in the join table, and based on
the argument and the current entry, run a DELETE, INSERT, or UPDATE
query, or take no action if the join table is already in the
correct state.
* Model.default_association_options has been added, which supports
default options for all future associations. You can use this to
do:
Model.default_association_options = {:read_only=>true}
Which makes associations not create modification methods by default.
You could still create the modification methods by passing
:read_only=>true when creating association.
* The tactical_eager_loading plugin now supports two additional
options when calling an association method: :eager and
:eager_reload. Example:
artist = Artist.all.first
# Loads all albums for all of the artists,
# and all tracks for all of those albums
artist.albums(:eager=>:tracks)
# Reload the artists association for all artists
artist.albums(:eager_reload=>true)
You can also use the :eager option for an eager loading callback:
# Eagerly load the albums with names starting with A-M
artist.albums(:eager=>proc{|ds| ds.where(:name > 'N')})
* The association_pks plugin now supports an :association_pks_nil
association option in the association_pks setter, for determining
how nil values should be handled.
In Sequel <4.31.0, if you provided nil, it would either raise an
exception immediately if :delay_pks was not set, or on saving if
:delay_pks was set.
In Sequel 4.31.0, if :delay_pks was not set, it would remove all
associated rows. If :delay_pks was set, it would do nothing.
You can now set :association_pks_nil=>:remove to remove all
associated values on nil, or :association_pks_nil=>:ignore to ignore
a nil value passed to the method. Without :association_pks_nil set,
an exception will be raised.
* Dataset#delete_from has been added on MySQL, allowing deletions from
multiple tables in a single query:
DB[:a].join(:b, :a_id=>:id).delete_from(:a, :b).delete
# DELETE a, b FROM a INNER JOIN b ON (b.a_id = a.id)
* The JDBC schema parser now includes a :remarks entry for each
column, which contains comments on the column.
= Other Improvements
* The setter method added by the association_pks plugin now handles
the empty array correctly when :delay_pks is set. Previously, if
the empty array was passed, Sequel made no modifications in this
case. Sequel now correctly removes all associated values if an
empty array is passed.
* The eager_each plugin now handles eager loading when using
Dataset#first and related methods. Previously, the behavior was
unspecified. In Sequel <4.27.0 Dataset#first did eager loading
correctly in the eager case, but incorrectly in the eager_graph
case. In Sequel 4.27.0-4.31.0, it did not do eager loading in
either case.
* The tactical_eager_loading plugin will not automatically eager load
if passing a proc or block to an association method, since the proc
or block could be specific to the receiver.
* Sequel now uses a mutex to synchronize access to the association
cache on MRI, as it does on other ruby implementations.
= Backwards Compatibility
* See above for changes in eager_each and association_pks plugin
behavior.
sequel-5.63.0/doc/release_notes/4.33.0.txt 0000664 0000000 0000000 00000006726 14342141206 0017776 0 ustar 00root root 0000000 0000000 = New Features
* A Sequel::Model.require_valid_table accessor has been added. This
setting is false for backwards compatibility, but if set to true,
will raise an error you try to create a model class where an
invalid table name is used or the schema or columns cannot be
determined. This makes it easier to catch bugs, as things will
fail fast, but it means that you must change code like:
class Foo < Sequel::Model
set_dataset :my_foos
end
to:
class Foo < Sequel::Model(:my_foos)
end
as otherwise Foo will attempt to use the foos table by default
when creating the class, which will raise an error as it is not
the correct table name.
* Sequel::Database#transaction now supports a :savepoint=>:only
option, which will create a savepoint if already inside a
transaction, but will yield without creating a transaction if
not inside a transaction. The use case for this is when you
are running code that may raise an exception, and you don't
want to invalidate the current transaction state.
= Other Improvements
* The graph_each extension now splits results into subhashes when
using Sequel::Dataset#first, as it did before Sequel 4.27.0.
* On PostgreSQL, Dataset#insert_conflict now accepts an array of
columns as the value for the :target option.
* You can now pass a Sequel::SQL::Identifier or a
Sequel::SQL::QualifiedIdentifer as the table argument when creating
a foreign key. Previously, only symbols were supported, and using
other values required specifying the :table option. So this will
now work to reference a table that includes a double underscore:
foreign_key :foo_id, Sequel.identifier(:fo__oo)
* Creating model classes inside a transaction on PostgreSQL where
the implicit table name isn't correct no longer causes the
transaction to fail.
Similar issues were also fixed in the boolean_readers,
boolean_subsets, and class_table_inheritance plugins.
* On PostgreSQL, You can now use the :qualify=>true option in the
schema dumper, to dump using schema-qualified table names.
* On Microsoft SQL Server, the set_column_allow_null and
set_column_not_null alter table methods now work on varchar(max),
text, and similar columns.
* On Oracle, Sequel::Database#sequence_for_table now returns nil if
given a table that doesn't exist or that the user does not have
access to.
* Passing arbitrary objects to a model association method now
indicates that the association should be reloaded, which was
used to work but was broken in Sequel 4.32.0.
* It is now possible to raise Sequel::ValidationFailed and
Sequel::HookFailed without an argument.
= Backwards Compatibility
* Sequel::Model no longer swallows many errors when subclassing or
setting datasets. While this should hopefully not affect backwards
compatibility, it may break things where the methods were raising
exceptions. If this does break backwards compatibility, it is
most likely because it is no longer hiding another bug that should
be fixed. Specific changes include:
* Model.inherited no longer rescues exceptions raised by set_dataset
* When subclassing a model that has a dataset, the columns and
schema are just copied from the superclass
* Only Sequel::Error is rescued in calls to columns and schema,
before it would rescue StandardError.
* The Sequel.firebird and Sequel.informix adapter methods have been
removed, they are no longer needed as the firebird and informix
adapters were removed a few versions back.
sequel-5.63.0/doc/release_notes/4.34.0.txt 0000664 0000000 0000000 00000006274 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* A duplicate_columns_handler extension has been added, for printing a
warning or raising an exception if a dataset returns multiple
columns with the same name. You can set this globally for the
Database:
DB.extension :duplicate_columns_handler
DB.opts[:on_duplicate_columns] = :warn
DB.opts[:on_duplicate_columns] = proc do |columns|
columns.include?(:foo) ? :raise : :ignore
end
or for specific datasets:
ds = DB[:table].extension(:duplicate_columns_handler)
ds = ds.on_duplicate_columns(:raise)
ds = ds.on_duplicate_columns do |columns|
columns.include?(:foo) ? :raise : :ignore
end
This makes it easier to detect when duplicate columns are returned,
which in some cases can cause undesired behavior, such as the values
for later columns of the same name overwriting values for earlier
columns.
* The Dataset#to_hash, #to_hash_groups, #select_hash, and
#select_hash_groups methods now take an options hash as a third
argument. This options hash can now contain a :hash option, which
specifies the object in which the resulting values should be
placed. You can use this to have the values inserted into a
custom hash, or another object responding to #[] and #[]=.
* A validators_operator validation has been added to the
validation_helpers plugin:
class Example < Sequel::Model
def validate
super
validates_operator(:>, 3, :column1)
validates_operator(:<=, 4, [:column2, :column3])
end
end
* The pg_range extension now adds a #register_range_type Database
method, supporting per-Database custom range types:
DB.register_range_type('timerange')
* The dataset_associations plugin now supports a
:dataset_associations_join association option on associations that
use joined datasets. This option will have the datasets returned
by the dataset association methods join to the same tables that
would be joined when retriving the associated objects, allowing
selected columns, orders, and filters that reference columns in
the joined tables to work correctly.
* The Database :preconnect option can now be set to :concurrently,
which will create the connections in separate threads. This can
significantly speed up preconnection in high-latency environments.
* The Database :name option is now supported, holding an arbitrary
name for the database. Currently, it is only used in PoolTimeout
exception messages, but it may be used in other places in the
future.
= Other Improvements
* The prepared_statements_safe plugin now works correctly when using
CURRENT_DATE and CURRENT_TIMESTAMP default values for columns.
* Sequel now recognizes an addition unique constraint violation on
Microsoft SQL Server.
* PoolTimeout exception messages now include the server/shard to which
the connection was attempted when using the sharded threaded
connection pool.
= Backwards Compatibility
* Users of sequel_pg should upgrade to 1.6.17, as older versions of
sequel_pg may not work with Sequel 4.34.0+.
* Any custom extensions that override Dataset#to_hash,
#to_hash_groups, #select_hash, and #select_hash_groups need to
be modified to add support for accepting the options hash.
sequel-5.63.0/doc/release_notes/4.35.0.txt 0000664 0000000 0000000 00000011127 14342141206 0017767 0 ustar 00root root 0000000 0000000 = Forwards Compatibility
* Ruby 2.4 will unify the Fixnum and Bignum classes into the Integer
class, making both Fixnum and Bignum references to Integer. This
will have the affect of changing the behavior of Sequel migrations
that use a reference to the Bignum class.
For example, code like this will change behavior in ruby 2.4:
DB.create_table(:table) do
add_column :column, Bignum
end
# or:
DB.get(Sequel.cast('1', Bignum))
as this references the Bignum class. On ruby <2.4, this will create
a 64-bit integer column, on ruby 2.4+, it will create a 32-bit
integer column.
Code like this will be fine and does not need changing:
DB.create_table(:table) do
Bignum :column
end
as this calls the Bignum method.
Sequel now supports the :Bignum symbol as a generic type, so you
can now switch references to the Bignum class to the :Bignum
symbol whenever you want a generic 64-bit integer type:
DB.create_table(:table) do
add_column :column, :Bignum
end
# or:
DB.get(Sequel.cast('1', :Bignum))
Note that you should only do this if you are using Sequel 4.35.0+,
as previous versions of Sequel will treat the :Bignum symbol as
a database-specific type named Bignum.
= New Features
* A Sequel::Database#log_connection_info accessor has been added. If
set to true, this includes connection information in Sequel's query
log. In threaded connection pools (the default), this makes it
simple to see which connection is executing which queries.
DB.log_connection_info = true
DB.get(1)
# Logged: (0.000004s) (conn: 9713390226040) SELECT 1 AS v LIMIT
* Sequel::Model#lock! now supports an optional lock style, instead
of always using FOR UPDATE (which is still the default):
Example.first.lock!('FOR NO KEY UPDATE')
#=> SELECT * FROM examples WHERE id = 1 FOR NO KEY UPDATE LIMIT 1
* Sequel::Dataset#skip_locked has been added, which skips locked rows
when returning query results. This is useful whenever you are
implementing a queue or similar data structure. Currently, this is
supported on PostgreSQL 9.5+, Oracle, and Microsoft SQL Server.
* An sql_comments extension has been added for setting SQL comments
on queries:
ds = DB[:table].comment("Some Comment").all
# SELECT * FROM table -- Some Comment
#
All consecutive whitespace in the comment is replaced by a
single space, and the comment ends in a newline so that it works
correctly in subqueries.
This extension is mostly useful if you are doing analysis of your
database server query log and want to include higher level
information about the query in the comment.
* A server_logging extension has been added, which includes
server/shard information in the query log, if connection info
is being logged.
DB.extension :server_logging
DB.log_connection_info = true
DB.get(1)
# Logged: (0.000004s) (conn: 9712828677240, server: read_only)
# SELECT 1 AS v LIMIT 1
DB[:a].insert(:b=>1)
# Logged: (0.000003s) (conn: 9712534040260, server: default)
# INSERT INTO a (b) VALUES (1)
* On PostgreSQL, Database#full_text_search now supports a
:headline option for adding an extract of the matched text to
the SELECT list.
* Sequel::Postgres::PGRange#cover? has been added to the pg_range
extension, which works with empty, unbounded, and exclusive
beginning ranges. Previously, using #cover? with these ranges
would raise an exception. Note that cover? is now always
defined, where previously it was only defined on ruby 1.9+.
= Other Improvements
* The jdbc adapters now work correctly on JRuby 9.1. Previously,
some parts were broken on JRuby 9.1 due to frozen string literal
issues.
* Sequel::Dataset#to_hash and #to_hash_groups now work correctly for
model datasets doing eager loading.
* Using Sequel::Database#transaction with the :rollback=>:always
option now automatically uses a savepoint if supported when run
inside another transaction. If savepoints are not supported,
using :rollback=>:always inside a transaction will now raise an
exception.
* The delay_add_association plugin now handles hashes and primary keys
passed to the add_* association methods.
* The json_serializer :include option now works correctly when using
*_to_many associations with the association_proxies plugin.
* The schema_dumper extension now recognizes bool as a boolean type,
for consistency with the Database schema parser.
= Backwards Compatibility
* Custom adapters should switch from using log_yield to
log_connection_yield so that they work correctly when using
log_connection_info.
sequel-5.63.0/doc/release_notes/4.36.0.txt 0000664 0000000 0000000 00000010571 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* Sequel::Model::Model() has been added, which allows for
Sequel::Model() like behavior where the base class used is a
subclass of Sequel::Model. To make it easier to use,
Sequel::Model.def_Model has also been added, which takes a module
and adds a Model() method to the module that calls Model() on the
receiver.
A :class_namespace association option has been added to make it
possible to set a default namespace for the :class option if given
as a symbol or string.
Sequel::Model.Model.cache_anonymous_models has been added and
controls whether to cache anonymous model subclasses created by
Sequel::Model::Model() on a per-class basis.
These changes are designed to make it easier to use namespaced
models, for example:
module Foo
Model = Class.new(Sequel::Model)
Model.def_Model(self)
DB = Model.db = Sequel.connect(ENV['FOO_DATABASE_URL'])
Model.plugin :prepared_statements
Model.default_association_options[:class_namespace] = 'Foo'
class Bar < Model
# Uses Foo::DB[:bars] as dataset
# Implicitly uses Foo::Baz as associated class
one_to_many :bazes
# Uses Foo::Baz due to :class_namespace option
one_to_many :oldest_bazes, :class=>:Baz, :order=>:id
end
class Baz < Model(:my_baz)
# Uses Foo::DB[:my_baz] as dataset
# Implicitly uses Foo::Bar as associated class
one_to_many :bars
# Uses Foo::Bar due to :class_namespace option
one_to_many :oldest_bars, :class=>:Bar, :order=>:id
end
end
* A string_agg extension has been added for aggregate string
concatentation support on PostgreSQL 9+, SQLAnywhere 12+,
Oracle11g+, DB 9.7+, MySQL, HSQLDB, H2, and CUBRID:
DB.extension :string_agg
ds = DB[:table]
ds.get(Sequel.string_agg(:c)) # ',' default separator
ds.get(Sequel.string_agg(:c, ' - ')) # custom separator
ds.get(Sequel.string_agg(:c).order(:bar)) # force order
ds.get(Sequel.string_agg(:c).distinct) # remove duplicates
* A connection_expiration extension has been added, for automatically
removing connections from the connection pool after they have been
open for a given amount of time (4 hours by default).
* Support for <, <=, >, and >= operator validations when using integer
and string arguments has been added to the constraint_validations
extension and plugin.
* Sequel::SQL::Function#order has been added to support ordered
aggregate functions:
Sequel.function(:foo, :bar).order(:baz)
# foo(bar ORDER BY baz)
= Other Improvements
* The validates_operator validation in validation_helpers now
considers nil values as invalid unless :allow_nil or a similar
option is used. Previously, using validates_operator with a nil
value would probably raise a NoMethodError. This makes
validates_operator more similar to other validations.
* The threaded connection pools no longer hold the pool mutex when
disconnecting connections, which is useful if the driver blocks
when disconnecting connections.
* The connection_validator extension no longer holds a reference
to connections that have been disconnected.
* The connection_validator extension no longer overwrites the
connection_validation_timeout if loaded a second time.
* Sequel now closes cursors as soon as it is done using them in the
oracle adapter, instead of waiting for GC to clean them up.
* Sequel now handles disconnect errors that occur when literalizing
strings in the mysql2 and postgres adapters.
= Backwards Compatibility
* Using the Bignum class as a generic type is now deprecated. As
announced in the 4.35.0 release notes, ruby 2.4 is unifying the
Fixnum and Bignum classes into Integer, which results in the
behavior of the Bignum class changing. 4.35.0 added support for
using the :Bignum symbol as a generic 64-bit integer type, and
Sequel users now need to switch to that to avoid the deprecation
warning.
Sequel 4.41.0 (to be released in December), will drop support
for using the Bignum class as a generic type. This is being done
before the release of ruby 2.4 to hopefully make it unlikely that
users will be subject to a behavior changes when upgrading ruby
versions.
Related to this change, external adapters need to switch from
overriding Database#type_literal_generic_bignum to
Database#type_literal_generic_bignum_symbol.
sequel-5.63.0/doc/release_notes/4.37.0.txt 0000664 0000000 0000000 00000003517 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* Database#values has been added on SQLite#3.8.3+, operating similarly
to the support on PostgreSQL:
DB.values([[1, 2], [3, 4]]).select_map([:column1, :column2])
# => [[1, 2], [3, 4]]
* Regular expressions in dataset filters are now supported on Oracle
10g+:
DB[:t].where(:c=>/re/)
# SELECT * FROM "T" WHERE REGEXP_LIKE("C",'re')
= Other Improvements
* Sequel now supports the use of native prepared statements and bound
variables in the mysql2 adapter, when mysql2 0.4+ is used.
Previously, the mysql2 adapter supported database prepared
statements, but variables were always literalized. That is still
supported when mysql2 <0.4 is used.
* The connection pool now removes connections if it detects a
disconnect error that is not raised as a
Sequel::DatabaseDisconnectError. Such exceptions are reraised
without converted them to Sequel::DatabaseDisconnectError, but the
related connection is now removed from the pool.
* The reversible migration support now handles add_constraint with an
options hash as the first argument.
* ASTTransformer now handles Sequel.extract, allowing Dataset#qualify
and other uses of ASTTransformer to work with such values.
* The create_view :columns option is now suppported on SQLite 3.9.0+.
* An additional disconnect error is now recognized in the postgres
adapter.
* A frozen string literal issue has been fixed when multiple different
database connection approaches have failed in the jdbc adapter.
= Backwards Compatibility
* External database adapters need to make sure that
Database#database_error_classes returns a valid result if called
during Database#initialize. If you have an external adapter where
one of the error classes depends on an argument given when
connecting (such as the connection string), you may have to make
some changes.
sequel-5.63.0/doc/release_notes/4.38.0.txt 0000664 0000000 0000000 00000005600 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Features
* Sequel::SQL::NumericMethods#coerce has been added, which adds
support for ruby's coercion protocol when performing numeric
operations. Previously, Sequel supported code like:
Sequel.expr{a - 1}
This is because a in this case returns a Sequel::SQL::Indentifier,
which defines #- to return a Sequel::SQL::NumericExpression. By
supporting #coerce, the following code now also works:
Sequel.expr{1 - a}
This is because Integer#- calls #coerce on the argument if it is
defined (ruby's coercion protocol). Previously, you had to handle
this differently, using something like:
Sequel.expr(1) - a
# or
Sequel.-(1, a)
* Sequel now supports the ** operator for exponentiation on
expressions, similar to the +, -, *, and / operators. Sequel uses
the database power function to implement this by default on the
databases that support it (most of them). On Access, it uses the ^
operator, on Derby it is emulated using a combination of exp/ln
(with some loss of precision). SQLite doesn't support a power
function at all, but Sequel emulates it using multiplication for
known integer exponents.
* Sequel::SQLTime.date= has been added, which allows you to set the
date used for Sequel::SQLTime instances. Sequel::SQLTime is a
subclass of Time that is literalized using only the time components,
and is the ruby class used to store values of database time columns
on most adapters. Sequel::SQLTime defaults to using the current
date, but you can now set a specific date, for more consistency with
some drivers (Mysql2 uses 2000-01-01, tiny_tds uses 1900-01-01).
* The postgres adapter now supports a :driver_options option when
using the pg driver, which is passed directly to pg. This can be
used to specify a client SSL certificate or to specify the
certificate authority root certificate when using
:sslmode=>'verify-full'.
= Other Improvements
* Sequel no longer uses after_commit/rollback database hooks by
default if the after_commit/after_rollback model methods are not
overridden. This provides a performance speedup, but the main
benefit is that it no longer causes memory issues when saving a
large number of model instances in a single transaction, and it
also works with prepared transactions/2 phase commit. You can
still set use_after_commit_rollback= manually to force the
after_commit/rollback setting.
Note that Sequel 5 will move after_commit/rollback model hooks to
a plugin, and the default and recommended approach will be to use
the database after_commit/rollback hooks in the after_save or
similar model hooks.
= Backwards Compatibility
* The Sequel::Model use_after_commit_rollback class and instance
methods now return nil by default instead of true. nil now
indicates the default behavior of checking whether the appropriate
model hook has been defined, and only adding a database hook if so.
sequel-5.63.0/doc/release_notes/4.39.0.txt 0000664 0000000 0000000 00000011130 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* Sequel.[] has been added as an alias to Sequel.expr. This makes it
a little easier to get Sequel-specific objects:
Sequel[:table].* # "table".*
Sequel[:table__column].as(:alias) # "table"."column" AS "alias"
Sequel[:column] + 1 # ("column" + 1)
* The timestamps plugin now supports an :allow_manual_update option.
If this option is used, the timestamps plugin will not override the
update timestamp when saving if the user has modified it since
retrieving the object.
* The touch plugin now also touches associations on create in addition
to update and delete.
* The IntegerMigrator now supports a :relative option, which will
migrate that many migrations up (for positive numbers) or down (for
negative numbers).
* Database#rollback_checker has been added, which returns a callable
that can be called later to determine whether the transaction ended
up committing or rolling back. So if you may need to check
transaction status at some future point, and don't need immediate
action on rollback/commit, it is better to use a rollback checker
than to add an after commit/rollback hook.
rbc = nil
DB.transaction do
rbc = DB.rollback_checker
rbc.call #=> nil
end
rbc.call # => false
DB.transaction(:rollback=>:always) do
rbc = DB.rollback_checker
end
rbc.call # => true
* The add_column schema method now supports an :if_not_exists option
on PostgreSQL 9.6+, which will only add the column if it does not
already exist:
DB.add_column :t, :c, Integer, :if_not_exists=>true
# ALTER TABLE "t" ADD COLUMN IF NOT EXISTS "c" integer
* The add_column schema method now supports an :after and :first
option on MySQL to add the column after an existing column or as
the first column:
DB.add_column :t, :c, Integer, :first=>true
# ALTER TABLE `t` ADD COLUMN `c` integer FIRST
DB.add_column :t, :c1, Integer, :after=>:c2
# ALTER TABLE `t` ADD COLUMN `c1` integer AFTER `c2`
* JSONBOp#insert has been added to the pg_json_ops extension, which
supports the new jsonb_insert function added in PostgreSQL 9.6+:
Sequel.pg_jsonb_op(:c).insert(%w'0 a', 'a'=>1)
# jsonb_insert("c", ARRAY['0','a'], '{"a":1}'::jsonb, false)
* Dataset#full_text_search on PostgreSQL now supports a
:to_tsquery=>:phrase option, to enable the native phrase searching
added in PostgreSQL 9.6+:
DB[:t].full_text_search(:c, 'foo bar', :to_tsquery=>:phrase)
# SELECT * FROM "t"
# WHERE
# (to_tsvector(CAST('simple' AS regconfig), (COALESCE("c", '')))
# @@ phraseto_tsquery(CAST('simple' AS regconfig), 'foo bar'))
* Sequel::Database.set_shared_adapter_scheme has been added, allowing
external adapters to add support for Sequel's mock adapter.
External adapters should have a shared adapter requirable at
sequel/adapters/shared/adapter_name, that uses the following
format:
# in sequel/adapters/shared/mydb
module Sequel::MyDB
Sequel::Database.set_shared_adapter_scheme :mydb, self
def self.mock_adapter_setup(db)
# Any mock-adapter specific setup to perform on the
# given Database instance
end
module DatabaseMethods
# methods for all Database objects using this adapter
end
module DatasetMethods
# methods for all Dataset objects using this adapter
end
end
= Other Improvements
* The hook_class_methods plugin only adds a Database transaction
hook if one of the after commit/rollback hook class methods is
actually used. This means that loading the plugin no longer
keeps all saved/deleted objects in memory until transaction
commit.
* The active_model plugin now uses a rollback checker instead of
an after_rollback hook, so models that use the active_model plugin
no longer store all saved model instances in memory until
transaction commit.
* When using the IntegerMigrator, attempting to migrate to a
migration number above the maximum will now migrate to the lastest
version, and attempting to migrate to a migration number below 0
will now migrate all the way down.
* The pg_interval extension now supports ActiveSupport::Duration
objects that use week and hour parts (new in ActiveSupport 5).
= Backwards Compatibility
* The change to the touch plugin to touch associations on create could
possibly affect existing behavior, so if you are using this plugin,
you should test that this does not cause any problems.
* External adapters that tried to add support for the mock adapter
now need to update their code to use the new
Sequel::Database.set_shared_adapter_scheme method.
sequel-5.63.0/doc/release_notes/4.4.0.txt 0000664 0000000 0000000 00000006576 14342141206 0017717 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now supports Sybase SQLAnywhere, via the sqlanywhere and
jdbc/sqlanywhere adapters.
* The filter by associations support now handles cases where the
association has :conditions or a block (as long as the block
does not rely on instance-specific behavior). This allows
you to handle the following:
Album.many_to_many :popular_tags, :class=>:Tag do |ds|
ds.where{tags__popularity > 9000}
end
Album.where(:popular_tags=>[Tag[1], Tag[2]])
This will return all albums whose popular_tags would include
at least one of those two tags. Previously, the block would
be ignored, returning albums containing one those tags even if
the tags weren't popular.
* A table_select plugin has been added that changes the default
selection for models from * to table.*. This is useful for
people who want ActiveRecord-like behavior instead of SQL-like
behavior, where joining tables doesn't automatically include
columns in the other table.
This can fix issues where joining another table that has columns
with the same name as columns in the model table without
specifying an explicit selection results in model objects being
returned where the values in the model object are the values
from the joined table instead of the model table.
* Dataset#offset has been added, for specifying offset separately
from limit. Previous this was possible via:
ds.limit(nil, offset)
but this is a friendlier API.
* The jdbc adapter now has support for foreign key parsing. This
is used if there is no specific support for the underlying
database.
* Foreign key parsing is now supported on Oracle.
= Other Improvements
* Association add_*/remove_*/remove_all_* methods for
pg_array_to_many associations now work on unsaved model objects.
* In the constraint_validations extension, deletes from the
metadata table are now processed before inserts, so that dropping
an existing constraint and readding a constraint with the same
name now works correctly.
* Cloning an association now copies the :eager_block option
correctly from the source association if it was passed as
the block to the source association method.
* Cloning a cloned association now copies the block for the
association.
* The descendants method in the tree plugin no longer modifies an
array it is iterating over.
* The jdbc/postgresql adapter now supports PostgreSQL-specific types,
with pretty much the same support as the postgres adapter. When
using the pg_* extensions, the dataset will now handle the
PostgreSQL types correctly and return instances of the correct
Ruby classes (e.g. hstore is returned as Sequel::Postgres::HStore).
You should no longer need to use the typecast_on_load or
pg_typecast_on_load plugins when using model objects that use these
types when using the jdbc/postgresql adapter.
* Offset emulation on Oracle now handles cases where selected
columns can't be ordered.
* Offset emulation on DB2 no longer automatically orders on all
columns if the dataset itself is unordered.
* Types containing spaces are now returning correctly when
parsing the schema in the oracle adapter.
* Database#tables no longer returns tables in the recycle bin on
Oracle.
* add_foreign_key now works correctly on HSQLDB, by splitting the
column addition and constraint addition into two separate
statements.
* add_primary_key now works correctly on H2.
sequel-5.63.0/doc/release_notes/4.40.0.txt 0000664 0000000 0000000 00000015013 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* A Sequel.split_symbols setting has been added. This setting is
true by default, so there is no change to backwards compatibility
by default. However, users can now do:
Sequel.split_symbols = false
to disable the splitting of symbols. This will make Sequel no
longer treat symbols with double or triple underscores as qualified
or aliased identifiers, instead treating them as plain identifiers.
It will also make Sequel no longer treat virtual row methods with
double underscores as qualified identifiers. Examples:
# Sequel.split_symbols = true
:column # "column"
:table__column # "table"."column"
:column___alias # "column" AS "alias"
:table__column___alias # "table"."column" AS "alias"
Sequel.expr{table__column} # "table"."column"
# Sequel.split_symbols = false
:column # "column"
:table__column # "table__column"
:column___alias # "column___alias"
:table__column___alias # "table__column___alias"
Sequel.expr{table__column} # "table__column"
Disabling symbol splitting can make things much easier if leading
trailing, double, or triple underscores are used in identifiers
in your database.
Disabling symbol splitting makes Sequel simpler, even if it does
make it slightly less easy to create qualified and aliased
identifiers. It is possible that the symbol splitting will be
disabled by default starting in Sequel 5.
Note that due to Database symbol literal caching, you should not
change the Sequel.split_symbols setting after creating a
Database instance.
* SQL::Identifier#[] and SQL::QualifiedIdentifier#[] have been added
for creating qualified identifiers. This makes it easier and more
natural to create qualified identifiers from existing identifiers.
Previously, you could do:
Sequel[:column].qualify(:table)
You can now use the more natural:
Sequel[:table][:column]
This can also be used in virtual rows:
Sequel.expr{table[:column]}
This offers a easy way to create qualified identifers when symbol
splitting has been disabled.
* A symbol_aref extension has been added, allowing the use of
Symbol#[] to create qualified identifiers if passed a Symbol,
SQL::Identifier, or SQL::QualifiedIdentifier. This doesn't
break any existing ruby behavior, as ruby currrently raises
an exception in such cases. Example:
:table[:column] # "table"."column"
This extension can make it easier to create qualified identifiers
if symbol splitting is disabled.
A symbol_aref_refinement extension has also been added, which
adds a refinement version of the extension that can be enabled via:
using Sequel::SymbolAref
* A symbol_as extension has been added, which adds the Symbol#as method
to create aliased identifiers. This was previously part of the core
extensions, but has been separated so it can be included by itself.
Example:
:column.as(:alias) # "column" AS "alias"
This extension can make it easier to create aliased identifiers if
symbol splitting is disabled.
A symbol_as_refinement extension has also been added, which
adds a refinement version of the extension that can be enabled via:
using Sequel::SymbolAs
* An s extension has been added, which adds the Sequel::S module,
containing a private #S method that calls Sequel.expr. You can
include this module in any module or class where you would like the
S method to be available:
class Album < Sequel::Model
extend Sequel::S
one_to_many :tracks, :order=>S(:number).desc
end
You can include this in Object if you want the S method to be
available globally:
Object.send(:include, Sequel::S)
Sequel::S also works if it is used as a refinement, adding the S
method to Object while the refinement is active:
using Sequel::S
This extension can make it easier to create qualified and aliased
identifiers if symbol splitting is disabled:
S(:table)[:column]
S(:column).as(:alias)
* Dataset#insert_conflict on PostgreSQL now supports a :conflict_where
option, allowing for the handling of insert conflicts when using a
partial unique index:
DB[:table].insert_conflict(:target=>:a,
:conflict_where=>{:c=>true}).insert(:a=>1, :b=>2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) WHERE (c IS TRUE) DO NOTHING
= Other Improvements
* Sequel no longer attempts to combine arguments for non-associative
operators, as doing so leads to invalid code in cases such as:
Sequel.expr{column1 - (column2 - 1)}
* Sequel now automatically adds NOT NULL constraints on columns when
adding a primary key constraint on the columns, if the database
doesn't handle that situation correctly.
* Database#rollback_checker now returns a thread-safe object.
* SQL::QualifiedIdentifier#initialize now converts SQL::Identifier
arguments to strings, fixing usage of such objects in the
schema methods.
* The prepared_statements plugin now correctly handles lookup by
primary key on models with joined datasets.
* The dataset_associations plugin now handles many_through_many and
one_through_many associations that use a single join table. Note
there is no reason to create such associations, as many_to_many
and one_through_one associations will work for such cases.
* The insert_returning_select plugin now handles cases where the
model doesn't have a valid dataset, fixing usage with the
lazy_attributes and dataset_associations plugins, and potentially
other plugins.
* The column_select plugin no longer raises an exception if the
model's table does not exist.
* The class_table_inheritance plugin now works when the
prepared_statements plugin is also used.
* Some adapters now avoid thread-safety issues during loading on
ruby implementations without a GVL by avoiding the modification of
shared datastructures.
* When using Database#tables with the :qualify=>true option on
PostgreSQL, table names with double or triple underscores are
now handled correctly.
= Backwards Compatibility
* The following Dataset constants are now frozen: NON_SQL_OPTIONS,
ACTION_METHODS, QUERY_METHODS, CONDITIONED_JOIN_TYPES,
UNCONDITIONED_JOIN_TYPES, and JOIN_METHODS. Of these,
NON_SQL_OPTIONS was previously modified in a non-thread-safe manner
by some adapters. External adapters should switch to having the
adapter's dataset non_sql_options method return an array of options
that do not affect the SELECT SQL for the adapter's datasets, rather
than modifying NON_SQL_OPTIONS.
sequel-5.63.0/doc/release_notes/4.41.0.txt 0000664 0000000 0000000 00000006517 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#with_* methods have been added as equivalents for a
few Dataset#*= methods, but instead of modifying the receiver, they
return a modified copy, similar to the dataset query methods.
Specific methods added:
with_extend :: Extends clone with given modules
with_row_proc :: Modifies row_proc in clone
with_quote_identifiers :: Modifies quote_identifiers setting in
clone
with_identifier_input_method :: Modifies identifier_input_method
setting in clone
with_identifier_output_method :: Modifies identifier_output_method
setting in clone
Similarly, on Microsoft SQL Server, a with_mssql_unicode_strings
method has been added, which returns a clone with the
mssql_unicode_strings setting modified.
* On DB2, Sequel now supports an :offset_strategy Database option,
which can be set to :limit_offset for "LIMIT X OFFSET Y" or
:offset_fetch for "OFFSET Y FETCH FIRST X ROWS ONLY". Depending
on what version of DB2 is used and how DB2 is configured, it's
possible one of these strategies will work. For backwards
compatibility, the current default is still to emulate offsets
using the ROW_NUMBER window function.
* In the json_serializer plugin, you can now use an
Sequel::SQL::AliasedExpression instance as an association name
value, which allows you to rename the association in the resulting
JSON:
album.to_json(:include=>{Sequel.as(:album, :s)=>{:only=>:name}})
# => '{"id":1,"name":"RF","artist_id":2,"s":{"name":"YJM"}}'
= Other Improvements
* The association dataset methods now correctly handle cases where
one of the keys is nil. Previously, they would incorrectly use an
IS NULL predicate in such cases. Now, they use a false predicate.
* The hook_class_methods plugin handling of commit hooks has been
fixed. The implementation of commit hooks (but not rollback
hooks) was broken in hook_class_methods starting in 4.39.0 due to
changes to avoid keeping references to all model instances until
the transaction was committed or rolled back.
* Using the Fixnum schema method no longer raises a warning on ruby
2.4+, as it now uses the Integer class instead of the Fixnum
constant.
* The ado adapter has been greatly improved. It now avoids memory
leaks, has much better type handling, and passes almost all specs.
Note that the ado adapter's behavior can change depending on the
version of ruby in use, try to use ruby 2.2+ for best compatibility.
* Dataset#graph no longer mutates the receiver. Previously, it set
an empty hash as the :graph option in the receiver, which was
unintentional and not desired.
* Pure java exceptions that don't support the message= method are now
handled properly when reraising the exception on connection errors
in the jdbc adapter.
= Backwards Compatibility
* Support for using the Bignum constant as a generic type has been
removed, as was preannounced in the 4.36.0 release notes. Users
should switch to using the :Bignum constant if they haven't already.
* Users of the ado adapter may need to update their code now that the
ado adapter correctly handles most types.
* The spec_*_w rake tasks in the repository now require ruby 2.4+ and
use the warning library for filtering warnings, instead of trying to
filter warnings with egrep.
sequel-5.63.0/doc/release_notes/4.42.0.txt 0000664 0000000 0000000 00000021746 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* There have been numerous improvements this release related to
frozen datasets. Frozen datasets now work in almost all cases,
except when calling a dataset mutation method.
When using ruby 2.4, Sequel uses the new support for
clone(:freeze=>false) to actually freeze datasets while allowing
them to copy singleton classes/extended modules from the dataset
calling clone. On earlier versions of ruby, the dataset opts
are now frozen, preventing more types of accidental modification.
The dataset internals were refactored to reduce the number of
instance variables. Now, datasets store all of their state
in opts. Additionally, all datasets now use a thread-safe
cache for storing cached state such as the dataset's columns.
Previously, accessing/setting the columns was not thread-safe,
unless the ruby interpreter used thread-safe methods for
instance variable getting/setting.
Frozen datasets use this new cache to optimize repeated method
calls, resulting in substantial performance speedups. This can
include caching returned and/or intermediate datasets, SELECT and
DELETE SQL generated, as well as internal objects designed to
optimize the building of SQL strings with different arguments.
Even for fairly simple datasets, this can result in up to 10x
performance improvements for dataset methods that don't require
database access, and up to 3x performance improvements for dataset
methods that do require database access.
* A freeze_datasets Database extension has been added which
automatically freezes all datasets for the Database instance.
This also enables dataset caching when creating datasets using
Database#[] and #from using a single symbol, such as
DB[:table_name]. In addition to speeding up the methods
themselves, this also allows code such as:
DB[:foo].for_update.first
To run much faster by avoiding any dataset creation or SQL
string building after the first call.
The freeze_datasets extension makes #dup an alias of #clone,
ensuring that all cloned datasets that were originally created
by the Database instance are frozen.
It is highly recommended that you start using the
freeze_datasets extension in your applications using Sequel,
as this extension will become the default and only behavior
in Sequel 5. Unfrozen datasets and dataset mutation will
not be supported in Sequel 5.
* The dataset methods created by Model#subset and
Model::DatasetModule#subset now cache the returned dataset if the
current dataset is frozen, none of the arguments are Procs, and a
block is not provided. This can result in up to a 3x performance
improvement for method chains that use subsets, such as:
ModelClass.subset1.subset2.subset3.first
* Model::DatasetModule has had the following methods added to it:
distinct, exclude, exclude_having, grep, group, group_and_count,
group_append, having, limit, offset, order, order_append,
order_prepend, select, select_all, select_append, select_group,
where, and server. These methods create dataset methods that
when called call the dataset method with the same name on the
receiver. Example:
class ModelClass < Sequel::Model
dataset_module do
select :with_id_and_name, :id, :name
where :active, :active
order :by_name, :name
end
end
ModelClass.active.by_name.with_id_and_name.all
# SELECT id, name FROM model_classes WHERE active ORDER BY name
# Equivalent to:
ModelClass.
where(:active).
order(:name).
select(:id, :name).
all
In addition to being easier than defining the methods manually, this
also enables caching of the datasets in most cases, so that the
above method chain does not create any additional datasets after the
first call.
* Dataset#with_extend now accepts a block and will create a module
with that block that will be used to extend the object, after any
modules given as arguments have been applied:
DB[:table].with_extend{def foo; 1 end}.foo => 1
* The identifier mangling support for datasets
(identifier_input_method and identifier_output_method) has been
moved to a identifier_mangling database extension, but it is still
loaded by default. You can disable the loading of this extension
by using the :identifier_mangling=>false Database option. Sequel
5 will stop loading of this extension by default, requiring you to
load it manually via Database#extension if you need it.
Sequel's default remains the same as before, to convert identifiers
to uppercase on input and lowercase on output on databases that
fold unquoted identifiers to uppercase (per the SQL standard), and
to not mangle identifiers at all on databases that fold unquoted
identifiers to lowercase (MySQL, PostgreSQL, SQLite). The
identifier_mangling extension just allows you to change the default
behavior.
* On DB2, Dataset#with_convert_smallint_to_bool has been added,
which returns a modified dataset with the
convert_smallint_to_bool setting changed. Previously,
chaging the smallint_to_bool setting required mutating a
dataset.
* The mock adapter now supports Dataset#with_{autoid,fetch,numrows},
allowing mocking of results when using frozen datasets.
= Other Improvements
* Using an eager load callback when eager loading a one_to_one
association that uses an order or offset now works correctly
on databases that do not support window functions.
* Dataset#== and Dataset#hash are now faster as they don't need
to generate SQL. As all internal state is now stored in the
opts, it just considers the class, db, and opts.
* The prepared statement/bound variable internals were heavily
refactored to be simpler and more robust, to more easily
support native prepared statements, and to work with frozen
datasets.
* When emulating alter table operations on SQLite, integer
primary keys now use AUTOINCREMENT, since that is Sequel's
default when creating tables on SQLite.
* On SQLite, Database#schema no longer uses :auto_increment entries
when the table has a composite primary key.
* Most dataset opts values are now frozen to prevent accidental
modification and allow for thread-safe access.
* SQL::Expression subclass instances are now always frozen.
* Dataset::PlaceholderLiteralizer and
Dataset::PlaceholderLiteralizer::Argument instances are now
always frozen.
* Dataset#ungraphed now works on a frozen model dataset.
* Model#set_server now works when the model uses a frozen dataset.
* The pagination and null_dataset extensions now work on frozen
datasets.
* Dataset#server now works for frozen model datasets when the
model uses the sharding plugin.
* Calling eager_graph or association_join on a model dataset
is now deprecated if it would ignore the association's
:conditions option and the :graph_conditions, :graph_block,
or :graph_only_conditions association option is not used.
* Using the :eager_limit dataset option in an eager_load
callback with a singular association now raises an Error.
Previously, the behavior was undefined.
* Calling Dataset#prepare without a name argument is now
deprecated. Previously, it raised an Error in the mysql, mysql2,
and postgres adapters, but was allowed on other adapters.
* The looser_typecasting extension now handles the strict
BigDecimal parsing introduced in ruby 2.4.
* When using the duplicate_columns_handler extension with
:on_duplicate_columns=>:warn, the warning message is now
prepend with the file and line.
* Internally, Sequel uses Dataset#where instead of #filter,
reverse instead of reverse_order, and select_append instead
of select_more to save a method call and array creation.
* Dataset#db= and #opts= in the sequel_3_dataset_methods
extension now raise a RuntimeError if the dataset is frozen.
* Sequel's tests now run without warnings when using Minitest
5.10.
* Sequel now issues a deprecation message instead of a warning
when used with PostgreSQL <8.2.
= Backwards Compatibility
* Any external dataset extensions or adapters that modified or
directly accessed dataset instance variables other than @db and
@opts (such as @columns) needs to be updated to work with the
new dataset internals.
* Any external adapters that implemented native prepared statements/
bound variables need to be updated to work with the new internal
prepared statement API.
* Model.set_dataset and .dataset= now operate on a clone of the
dataset given, instead of mutating the dataset that is passed in.
This allows them to work with frozen datasets, but can change
the behavior if you mutate a dataset after passing it to one
of these methods. Anyone doing that needs to change their code
to get the current copy of the model's dataset, and mutate that,
or better yet, avoid mutating datasets at all.
* Dataset#columns now calls #columns! instead of the other way around,
which may require external plugins/extensions that override #columns
to switch to overriding #columns!.
* External adapters that want to disable identifier mangling by
default need to be updated.
sequel-5.63.0/doc/release_notes/4.43.0.txt 0000664 0000000 0000000 00000006517 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* Database#freeze has now been implemented. It is now recommended
to use it in production and during testing, after loading
extensions and making other changes to Database state. Once frozen,
the Database settings cannot be modified, but the Database can
execute queries and return results. By freezing the Database, you
gain greater thread-safety assurance and will be alerted via an
exception if runtime code attempts to modify Database state.
= Other Improvements
* Model#refresh now uses the same optimization that Model.with_pk
uses, resulting in faster refreshing for most models.
* The prepared_statements plugin no longer automatically uses
prepared statements in cases where it is likely to be slower.
Now, prepared statements are only used by default for INSERT
and UPDATE statements (and DELETE on Oracle and DB2). This
change was made after benchmarking showed that using prepared
statements for primary key lookups, refreshes, and deletes is
significantly slower than non-prepared statements across almost
all adapters.
* Database#extension no longer attempts to load the same extension
more than once.
* The timestamp migrator now handles key length limitations when
using MySQL with InnoDB engine and utf8mb4 charset default.
* The jdbc/sqlite adapter will now use SQLite result codes for
more accurate exception types, if the jdbc-sqlite driver
supports them.
* dataset_module is now inherited correctly if using the
single_table_inheritance plugin. This was broken in 4.42.0,
and fixed in 4.42.1.
* The prepared_statements plugin now respects a server specified
for the model instance, instead of always using the default
server.
* The prepared_statements_associations plugin now respects a
server specified for the model instance if using the sharding
plugin, instead of always using the default server.
* The prepared_statements_associations plugin now works correctly
when using some instance-specific associations, such as
many_to_one associations using a nil :key option.
* The prepared_statements_with_pk plugin now respects a server
specified for the dataset, instead of always using the default
server.
* Model#freeze now freezes the associations hash after validating
the model instance, instead of before, fixing cases where
validation calls association methods.
* Sequel no longer continually tries to determine the server
version on HSQLDB, if the first attempt fails.
* The mock adapter now uses a thread-safe incrementor for autoid.
* Mysql2 native prepared statement support now better handles
sharding where connection options differ per shard.
* On Oracle, Database#sequence_for_table is now thread-safe.
* On PostgreSQL, Database#type_supported? is now thread-safe.
* On MySQL, Database#supports_timestamp_usecs? now correctly
memoizes false values.
* The jdbc/postgresql adapter now works correctly if the
pg_hstore extension is loaded first.
= Backwards Compatibility
* Maintainers of external adapters and Database extensions
should update their code to support Database#freeze.
* Code that relies on Database extensions being loaded multiple
times if called multiple times needs to be updated, because
now the extension only gets loaded the first time
Database#extension is called.
* @enum_labels in the pg_enum extension is now frozen by default.
sequel-5.63.0/doc/release_notes/4.44.0.txt 0000664 0000000 0000000 00000010530 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* Model.freeze is now supported and recommended in production and
during testing. It freezes all class-level metadata, preventing
possible thread-safety issues at runtime.
* Model.finalize_associations has been added, speeding up some
association reflection methods by about 10x. This method
should be called after all associated models have been loaded.
This can speed up the retrieval of associated objects for small
datasets by 5-10%.
One advantage of using this is it will raise an exception if it
recognizes that any of your associations are not defined
correctly, such as referencing an associated class that doesn't
exist.
* Model.freeze_descendents has been added to the subclasses plugin.
This method finalizes associations for all descendent classes,
then freezes the descendent class. It's designed to make it
easy to freeze all model classes in use:
Sequel::Model.plugin :subclasses
Dir['./models/*.rb'].each{|f| require f}
Sequel::Model.freeze_descendents
* An implicit_subquery dataset extension has been added, which
implicitly uses a subquery if you have a dataset with raw SQL and
you call a method that would modify the SQL used:
DB['SELECT * FROM foo'].where(:bar=>1)
# SELECT * FROM foo
DB.extension :implicit_subquery
DB['SELECT * FROM foo'].where(:bar=>1)
# SELECT * FROM (SELECT * FROM foo) AS t1 WHERE (bar = 1)
* Model datasets now have where_all, where_each, and
where_single_value methods for returning data:
class Album < Sequel::Model; end
Album.where_all(:id=>[1,2,3])
# => [Album[1], Album[3], Album[2]]
Album.where_each(:id=>[1,2,3]) do |album|
# ...
end
Album.select(:name).where_single_value(:id=>1)
# "Album's Name"
These methods are designed for use by other dataset methods you
define, and are optimized for frozen datasets if the methods will
be called multiple times on the same dataset. where_all and
where_each can increase performance by up to 40% for small datasets
compared to where.all and where.each. where_single_value can be up
to twice as fast as where.single_value.
* Model.dataset_module now supports an eager method for eager loading:
class Album < Sequel::Model
many_to_one :artist
dataset_module do
eager :with_artist, :artist
end
end
Album.with_artist.all # eagerly loads artist association
= Other Improvements
* The jdbc adapter now supports Database#freeze. Possible
thread-safety issues when initializing multiple jdbc Database
instances in separate threads at the same time have been fixed.
* The postgres adapter now raises an exception if it recognizes that
the loaded version of sequel_pg is incompatible.
* Sequel classes that are subclasses of core classes now define
custom #inspect methods so instances can easily be differentiated from
core class instances. For example:
Sequel::SQL::Blob.new('a')
# => #
Sequel::SQLTime.now
# => #
Sequel::LiteralString.new("foo")
# => #
class Album < Sequel::Model; end
Album.many_to_one :artist
# => #
Sequel::SQL::ValueList.new([[1,2]])
# => #
* Dataset#from_self now copies the columns from the current dataset
if they are present, since wrapping a dataset in a subquery should
not change the columns returned.
* On PostgreSQL, array type conversion now correctly handles false
values.
* Another disconnect error is now recognized by the jdbc/as400
adapter.
* Modifications to Sequel::Model::Associations::ASSOCIATION_TYPES
are now thread safe, fixing issues if separate threads attempt
to load separate model plugins that modify this hash.
* The force_encoding plugin no longer modifies the encoding of
Sequel::SQL::Blob instances.
* Many plugins were updated so they no longer add constants to the
namespace of the model that loads them.
= Backwards Compatibility
* Maintainers of external model plugins should update their
code to support Model.freeze.
= Upcoming Deprecation
* Starting in Sequel 4.45.0, Sequel will be adding deprecation
warnings for features that will be removed or where behavior will
change in Sequel 5.
sequel-5.63.0/doc/release_notes/4.45.0.txt 0000664 0000000 0000000 00000031740 14342141206 0017773 0 ustar 00root root 0000000 0000000 = Deprecated Features
* Dataset mutation is now deprecated. Users should switch to using
the non-mutating methods.
# Instead of:
dataset.where!(:foo)
# Switch to:
dataset = dataset.where(:foo)
* Support for the Cubrid, Firebird, Informix, and Progress databases
has been deprecated. Any users of this support should consider
creating an external adapter with the current code and maintaining
such support themselves.
* The do (DataObjects), swift, and jdbc/as400 adapters have been
deprecated. Any users of these adapters should consider creating an
external adapter with the current code and maintaining the adapter
themselves.
* Model transaction hooks (after_commit, after_rollback,
after_destroy_commit, after_destroy_rollback) are now deprecated.
Users should switch to calling the after_commit and after_rollback
database transaction hooks directly.
# Instead of:
def after_commit
super
do_something
end
# Switch to:
def after_save
super
db.after_commit{do_something}
end
* Passing a block to Database#from is now deprecated. For backwards
compatibility, this block affected the WHERE clause instead of the
FROM clause. In Sequel 5, Database#from blocks will be treated like
Dataset#from blocks, and will affect the FROM clause. This behavior
has been available for years by using the from_block extension.
# Instead of:
DB.from(:foo){a > b}
# Switch to:
DB.from(:foo).where{a > b}
* Passing non-hash arguments and multiple arguments to the
model association methods is now deprecated. Switch to using a
hash as an argument.
# Instead of:
model.association(true)
model.association(proc{|ds| ds.where(:foo)})
# Switch to:
model.association(:reload=>true)
model.association(:callback=>proc{|ds| ds.where(:foo)})
model.association{|ds| ds.where(:foo)}
* Passing procs as filter arguments is now deprecated. These should
now be passed as blocks instead of arguments.
# Instead of:
dataset.where(proc{foo > bar})
# Switch to:
dataset.where{foo > bar}
* Passing multiple arguments or an array as filter arguments when the
array/arguments does not represent a conditions specifier (array of
two element arrays, treated like a hash) is now deprecated. Switch
to calling the filter method separately with each argument or using
Sequel.& to combine the arguments:
# Instead of:
dataset.where(:foo, :bar)
dataset.where([:foo, :bar])
# Switch to:
dataset.where(:foo).where(:bar)
dataset.where(Sequel.&(:foo, :bar))
* Returning false from model before hooks to cancel an action is
now deprecated. Switch to calling cancel_action instead.
# Instead of:
def before_save
return false if something
super
end
# Switch to:
def before_save
cancel_action('something bad') if something
super
end
* Database#each_server has been deprecated. Switch to using
Database#servers and Database#with_server from server_block
extension:
# Instead of:
DB.each_server{|db| db.run("foo")}
# Switch to:
DB.extension :server_block
DB.servers.each{|s| DB.with_server(s){DB.run("foo")}}
* Calling Database#add_servers and Database#remove_servers on a
database that does not use the :servers option is now deprecated.
Currently, the calls to add_servers and remove_servers are
ignored for such databases, which can hide errors.
* Sequel::Postgres::PG_NAMED_TYPES is now deprecated. Switch to
calling Database#add_named_conversion_proc instead.
# Instead of:
require 'sequel/adapters/utils/pg_types'
Sequel::Postgres::PG_NAMED_TYPES[:foo] = lambda{|v| v}
DB = Sequel.connect('postgres://...')
# Switch to:
DB = Sequel.connect('postgres://...')
DB.add_named_conversion_proc(:foo){|v| v}
* Modifying the identifier mangling settings for a Database or
Dataset is now deprecated unless the identifier_mangling extension
is explicitly loaded into the Database instance.
* The Sequel::Database.single_threaded accessor is now deprecated.
Switch to using Sequel.single_threaded= and Sequel.single_threaded?.
* Sequel::Database.identifier_input_method,
Sequel::Database.identifier_output_method,
and Sequel::Database.quote_identifier accessors are now deprecated.
Switch to modifying the setting for each Database instance.
* Sequel.identifier_input_method=, Sequel.identifier_output_method=,
and Sequel.quote_identifer= setter methods are now deprecated.
Switch to modifying the setting for each Database instance.
* Calling Dataset#delete/update/truncate on datasets with limits
or offsets is now deprecated, unless the database will respect
the limit or offset. Currently, only MySQL and Microsoft SQL
Server have limited support for such deletes and updates. You
should either call unlimited or skip_limit_check before calling
delete/update/truncate.
* Deprecate having duplicate column names in subclass tables when
using the class_table_inheritance plugin. The documentation has
warned against this for a long time, but the code did not enforce
it.
* When using the association_pks plugin setter methods without the
:delay_pks association option set, a warning is now issued. In
Sequel 5, the default will be to assume that the :delay_pks
option is :always, and not to make modifications until the object
is saved. If you would like to keep the current behavior, set
the :delay_pks=>false association option.
The current :delay_pks=>true behavior will be removed in Sequel 5,
with it being treated like :delay_pks=>:always. If you are relying
on the current behavior of :delay_pks=>true (delay for new objects,
immediate for existing objects), you will need to update your code.
* Database#dup/clone are now deprecated. They have never been
handled correctly, since the default implementation from Kernel
has been used.
* Model.dup/clone are now deprecated. They have never been
handled correctly, as the default implemenation from Kernel/Module
has been used.
* Database#use on MySQL is now deprecated. Switch to creating a new
Database instance instead of modifying the database for an existing
instance.
* Database#database_name on MySQL is now deprecated. Switch to asking
the database server which database you are connected to:
# Instead of:
DB.database_name
# Switch to:
DB.get{DATABASE{}}
* In the lazy_attributes, nested_attributes, composition, and
serialization plugins, the *_module accessors are now deprecated.
These were implementation details that should not have been
exposed.
* The schema plugin is now deprecated. Switch to defining the schema
before creating the model class using the Database schema methods.
* The scissors plugin is deprecated. It existed for compatibility
with Sequel 3, but it is dangerous as it makes it easier to modify
all rows when the intent was to modify a single row.
* The prepared_statements_associations and prepared_statements_with_pk
plugins are now deprecated. These plugins generally make things
slower.
* Dataset#unbind, Sequel::Unbinder, and Sequel::UnbindDuplicate are
now deprecated. This mostly existed to support the
prepared_statements_associations and prepared_statements_with_pk
plugins.
* Sequel::Error::* exception class aliases are now deprecated. Switch
to using the exception classes in the Sequel namespace.
* Sequel::BeforeHookFailed is now deprecated. Switch to using
Sequel::HookFailed.
* Calling Sequel::Qualifier.new with 2 arguments is now deprecated.
Users should switch to calling it with a single argument (the
table used for qualifying unqualified identifiers).
* Treating unrecognized prepared statement types as :select is now
deprecated. Switch to using :select as the prepared statement
type.
* The @was_new instance variable available in model after_save hooks
is now deprecated. There is no deprecation warning associated
with this change.
# Instead of:
def after_save
super
if @was_new
do_something
else
do_something_else
end
end
# Switch to:
def after_create
super
do_something
end
def after_update
super
do_something_else
end
* The @columns_updated instance variable available in model
after_save and after_update hooks is deprecated. Switch to
using the new columns_updated plugin and calling the
columns_updated method.
* The Sequel.cache_anonymous_models accessor has been deprecated.
Switch to using Sequel::Model.cache_anonymous_models.
* Sequel::Model::ANONYMOUS_MODEL_CLASSES and
Sequel::Model::ANONYMOUS_MODEL_CLASSES_MUTEX have been
deprecated.
* Sequel::Database::ResetIdentifierMangling has been deprecated.
= New Features
* A validation_contexts plugin has been added, which adds support
for a :validation_context option to Model#save and Model#valid?.
The value for this option will be available via the
validation_context method inside the validation hooks and
validate method.
class Album < Sequel::Model
plugin :validation_contexts
def validate
super
if validation_context == :approve
errors.add(:status_id, 'not 42') unless status_id == 42
end
end
end
album = Album.first
album.status_id = 41
album.valid?(:validation_context=>:approve) # => false
album.status_id = 42
album.valid?(:validation_context=>:approve) # => true
* A columns_updated plugin has been added, allowing you to get
access to the hash used for updating a model instance via the
columns_updated method:
class Album < Sequel::Model
plugin :columns_updated
def after_update
super
if columns_updated.has_key?(:foo)
do_something(columns_updated[:foo])
end
end
end
* Dataset#delete on Microsoft SQL Server now respects limits. Note
that Microsoft SQL Server does not respect orders for deletes, only
limits, which makes this support not very useful. Currently a
deprecation warning will be issued when using a delete with an
order and a limit, and in Sequel 5 an exception will be raised.
* An odbc/oracle subadapter has been added.
* A Model.dataset_module_class accessor has been added, allowing
plugins to add support for custom behavior in dataset_module blocks.
* Support for deprecating constants on Ruby 2.3+ has been added.
Note that you will only get warnings for deprecated constant
use if you are running on Ruby 2.3+. If you are running on a
previous version of Ruby, you should scan your code manually for
deprecated constant use.
= Other Improvements
* Using Model#cancel_action inside validation hooks now works
correctly when Model#valid? is called.
* Model#[] now handles columns with false values correctly when using
the split_values plugin.
* When calling Dataset#union/intersect/except on a dataset with
an offset but no limit, the dataset is wrapped in a subquery, just
like a dataset with a limit.
* The dumping of 64-bit autoincrementing primary key columns by the
schema_dumper extension is now handled correctly when using the
:same_db option.
* The schema_dumper extension now supports the :schema option when
dumping schema.
* On Microsoft SQL Server and SQLAnywhere, ORDER BY clauses now come
after UNION/INTERSECT/EXCEPT instead of before, fixing issues when
the :from_self=>false option is used with union/intersect/except
and an order is applied afterward.
* On Microsoft SQL Server, if calling Dataset#union/intersect/except
on a dataset with an order and without a limit or offset, the order
is removed. When using UNION/INTERSECT/EXCEPT, Microsoft SQL
Server does not guarantee any ordering unless you specify an order
for the compound dataset. As a general rule, you should always
apply orders after compounds instead of before.
* On Microsoft SQL Server <2012, when using a dataset with an offset
without a limit in a UNION/INTERSECT/EXCEPT query, Sequel now uses
TOP (100) PERCENT to work around the limitation that using orders
in subqueries is not supported unless there is a limit (offsets
are emulated by a ROW_NUMBER window function with an order in this
case).
* Database#indexes on MySQL now handles qualified identifiers.
* Sequel now literalizes Sequel::SQLTime instances with 3 fractional
digits in the jdbc/postgresql adapter, fixing issues on JRuby
9.1.8.0+ (the first JRuby version to support greater than
millisecond precision).
= Backwards Compatibility
* When using the association_proxies plugin and passing a block when
loading the plugin, the :proxy_argument option in hash passed to
the block is now an empty hash instead of nil if no argument was
given to the association method.
* The private Model#_valid? method now takes a single options hash
argument, instead of 2 arguments.
* The pg_hstore extension no longer modifies PG_NAMED_TYPES. This
should not affect behavior if the pg_hstore extension is loaded
into the Database instance.
* Support for pg <0.8.0 has been dropped. pg 0.8.0 was released in
January 2008.
sequel-5.63.0/doc/release_notes/4.46.0.txt 0000664 0000000 0000000 00000035726 14342141206 0020004 0 ustar 00root root 0000000 0000000 = Deprecated Features
* Symbol splitting is now deprecated by default. Sequel has
split symbols since the very first version, but it has caused
many problems over the years and while terse, it isn't
intuitive to new Sequel users and causes significant
problems when using databases that use double/triple
underscores in identifiers.
If you are using symbols with embedded double/triple
underscores, such as:
:table__column
:column___alias
:table__column___alias
you either need to turn symbol splitting on by doing:
Sequel.split_symbols = true
or you need to convert the symbols to Sequel objects:
Sequel[:table][:column]
Sequel[:column].as(:alias)
Sequel[:table][:column].as(:alias)
Sequel ships with multiple extensions that make creation
of those Sequel objects less verbose, so consider using
the symbol_aref, symbol_aref_refinement, symbol_as,
symbol_as_refinement, and/or s extensions.
To automatically convert symbols with double/triple
underscores to their Sequel object equivalents, you can
use the sequel-unsplit tool available at
https://github.com/jeremyevans/sequel-unsplit.
This deprecation also affects virtual row block methods that use
double underscores. For example:
DB[:table].where{table__column > 3}
should be changed to:
DB[:table].where{table[:column] > 3}
* Automatically treating plain strings passed to filtering/update
methods as literal strings has been deprecated, with support moved
to the auto_literal_strings extension. The automatic conversion
of plain strings to literal SQL is the most common cause of
SQL injections in applications using Sequel, since many methods
pass their arguments down to the filtering methods, without
considering whether the argument might be a string derived from
user input. By requiring explicit marking of literal SQL strings,
SQL injections are less likely and easier to audit for.
This change means that unless you want to use the
auto_literal_strings extension, code such as:
DB[:table].where("a = 1")
DB[:table].where("a > ?", 1)
should to be converted to:
DB[:table].where(Sequel.lit("a = 1"))
DB[:table].where(Sequel.lit("a > ?", 1))
or even better, avoid literal SQL completely by converting it to
use equivalent Sequel expressions:
DB[:table].where(:a => 1)
DB[:table].where{a > 1}
This change also affects passing Dataset#update a string:
# Before
DB[:table].update("a = a + 1")
# Change to
DB[:table].update(Sequel.lit("a = a + 1"))
DB[:table].update(:a => Sequel[:a] + 1)
Note that this deprecation does not affect cases where literal
SQL is used for the entire query, such as when using any of the
following:
DB["SELECT * FROM foo"]
DB.fetch("SELECT * FROM foo WHERE a = ?", 1)
DB.dataset.with_sql("SELECT * FROM foo WHERE a = ?", 1)
* Passing blocks to virtual row methods has been deprecated,
with support moved to the virtual_row_method_block
extension. Historically, passing blocks to virtual row methods
changed how the methods were handled, but in recent years
alternative methods have been added to get the same results.
If you don't want to use the virtual_row_method_block extension,
conversion is fairly simple:
# WHERE a()
# Before
where{a{}}
# Change to
where{a.function}
# SELECT count(*)
# Before
select{count(:*){}}
# Change to
select{count.function.*}
# SELECT count(DISTINCT c)
# Before
select{count(:distinct, :c){}}
# Change to
select{count(:c).distinct}
# SELECT sum(c) OVER (PARTITION BY a)
# Before
select{count(:over, :args=>c, :partition=>:a){}}
# Change to
select{count(:c).over(:partition=>:a)}
* Model.set_allowed_columns and Model#{set,update}_{all,only}
have been deprecated, with support moved to the
whitelist_security plugin. These were the historical mass
assignment methods supported by Sequel, but set_fields and
update_fields have been recommended instead for many years.
* Model.finder and .prepared_finder have been deprecated by default,
with support moved to the finder plugin. Model.finder was
originally added to make it easy to create optimized finder
methods, but few Sequel users actually use it, so it makes more
sense to move it to a plugin.
* Model.def_dataset_method and Model.subset have been deprecated
by default, with support moved to the def_dataset_method plugin.
It's been recommended for many years to use Model.dataset_module
to define dataset methods, instead of calling def_dataset_method
and subset on the model class.
* Using ` in virtual rows to create literal SQL is now deprecated,
switch to using Sequel.lit instead:
# Before
DB[:table].where{`a = 1`}
# Change to
DB[:table].where(Sequel.lit('a = 1'))
* Corner cases in argument handling in the filtering methods are now
deprecated, including:
* Ignoring a filtering method called without an argument or block.
In Sequel 5, this will raise an exception.
* Ignoring empty string arguments or other objects that respond to
empty? and return true. In Sequel 5, only an empty array or hash
will be ignored.
* Ignoring an explicit nil argument when a block is passed. In
Sequel 5, this will use a NULL filter.
* Ignoring an explicit nil argument when there is no existing
filter on the dataset. In Sequel 5, this will use a NULL
filter.
* Using a joined dataset as a Sequel::Model dataset is now
deprecated. Such datasets should now be wrapped in a subquery.
In Sequel 5, such datasets will automatically be wrapped in
a subquery aliased to the first table.
# Before
Model.dataset = DB[:a].join(:b, :id=>:b_id)
# Change to
Model.dataset = DB[:a].join(:b, :id=>:b_id).from_self(:alias=>:a)
* Model.first_where has been deprecated, Model.first should be used
instead.
* Database#log_yield is now deprecated. This does not affect any of
the adapters that ship with Sequel, but external adapters that
have not yet been updated to support #log_connection_yield will need
to be updated.
* The set_overrides extension is now deprecated. Anyone using it
should consider supporting it as an external extension.
* Many internal Database and Dataset regexp and string constants
that were previously used internally have been deprecated.
Additionally, some historical aliases for existing constants
have also been deprecated, such as Sequel::Schema::Generator.
Ruby 2.3+ is required to receive deprecation warnings related to
these constants.
* Passing model classes as the first argument to Dataset#join_table
and Dataset#graph is now deprecated. Pass the model's table name
or the model's datasets instead.
* Passing model instances to Dataset#insert and #insert_sql is now
deprecated. Call values on the model instance to get the values
hash, and pass that as the argument instead.
* Calling Dataset#set_graph_aliases before Dataset#graph is now
deprecated. Dataset#set_graph_aliases should now be called
after Dataset#graph, not before.
* The sequel/no_core_ext file is deprecated. Sequel hasn't loaded
the core extensions by default since Sequel 3. You can use the
following if you want to support both Sequel 3 and Sequel 5:
begin
require 'sequel/no_core_ext'
rescue LoadError
require 'sequel'
end
* Database#pragma_get and #pragma_set on SQLite are now deprecated,
along with any method that calls them, such as auto_vacuum,
temp_store, foreign_keys, case_sensitive_like, synchronous, and
their setter methods. To set these pragrams for all SQLite
database connections, the appropriate options should be passed
when creating the Database instance.
* Automatically looking up the dataset class for a Database
instance by looking for a DatasetClass constant in the Database's
class is now deprecated. All adapters that ship with Sequel have
been converted, but external adapters should now define the
Database#dataset_class_default private method appropriately
to return the correct dataset class.
* Calling Model.db= on a model with a dataset is now deprecated.
If a model already has a dataset, you must now use set_dataset
or dataset= to change the dataset, not db=.
* Sequel::SQL::Expression#sql_literal and #lit are now deprecated.
These aren't used internally and aren't expected to be used
externally.
* {Integer,Timestamp}Migrator::DEFAULT_SCHEMA_{COLUMN,TABLE}
are now deprecated. They have been replaced by
default_schema_column and default_schema_table instance methods.
* Passing a Schema::CreateTableGenerator instance as the
second argument to Database#create_table is now
deprecated. Database#create_table still supports passing the
generator via the :generator option.
* Passing a second argument to Database#alter_table is now
deprecated.
* Sequel::BasicObject.remove_methods! is now deprecated. It has
always been a no-op on ruby 1.9+.
* Referencing the PG_NAMED_TYPES constant in your code is now
deprecated. Previously, adding entries to the PG_NAMED_TYPES
was deprecated, but no deprecation message would be issued by
referencing the constant.
* The conversion of - to _ in adapter schemes is now deprecated.
This does not affect any internal adapters, but it may affect
external ones.
* The Database#jdbc_* methods in the jdbc/db2 adapter (e.g.
jdbc_tables) are now deprecated. Call the regular versions
instead (e.g. tables).
* Dataset#_filter and #_filter_or_exclude private methods have
been deprecated. If you have an extension that was calling these
methods, switch to the new #add_filter private method.
= New Features
* The class_table_inheritance plugin now supports an :alias option.
If provided, this wraps subclass datasets in subqueries, avoiding
problems with ambiguous columns and cases where the wrong table
name is used. Due to the deprecation of joined datasets for
models, use of the class_table_inheritance plugin without this
:alias option will result in deprecation warnings. In Sequel 5,
class_table_inheritance will default to using an :alias option
with the same as the name of the parent table.
* The Dataset#sqltime_precision private method has been added.
Adapters can use override this if the precision for time values
is different from the precision for timestamp values. Sequel
uses this support on Microsoft SQL Server, so that time values
now support microsecond precision, instead of millisecond
precision.
= Other Improvements
* Sequel::Model classes that use a SQL::Identifier or
SQL::QualifiedIdentifier FROM table value will now use optimized
lookups and deletes, just as is done for those that use a
Symbol or String.
* Dataset#simple_select_all? now handles aliased subqueries
correctly, returning false instead of true.
* If Sequel.application_timezone is set to :utc,
Sequel::SQLTime.create will create instances using utc time
instead of local time.
* If there is an exception while rolling back a transaction when
using the :rollback=>:always option, the exception is now
raised instead of being ignored.
* If a migration file does not contain a migration or contains
multiple migrations, the exception raised will now include
the file name in the exception message.
* In the jdbc/sqlserver adapter, time values with fractional
seconds and datetimeoffset values are now handled better when
using some versions of the underlying JDBC driver.
* An additional disconnect error is recognized when using the
mysql and mysql2 adapters.
* Dataset#full_text_search on Microsoft SQL Server now works
correctly if the no_auto_literal_strings extension is used.
* Calling Database#disconnect when using the single connection
pool without an active connection works correctly again. It was
broken starting in 4.43.0 during changes to implement
Database#freeze.
* Model class methods are no longer added for private methods
defined in a dataset_module block. Previously, a public model
class method was defined, but it would raise an error when
called.
* Fixnum is no longer referenced in the sqlanywhere shared
adapter, fixing deprecation warnings on ruby 2.4.
* Sequel no longer uses constants for building SQL queries,
relying on frozen string literal support for better
performance on ruby 2.3+. However, this decreases SQL
query building performance on ruby <2.3. For the fastest
SQL query building, update to a recent version of ruby.
* Sequel no longer ignores an empty object argument to a
filtering method if a block is provided. Previously,
this could raise an exception or produce invalid SQL.
* Many small modifications were made to reduce array
allocations, providing minor speedups.
* Internal use of Array#at has been replaced with Array#[],
providing minor speedups on recent ruby versions.
* The jdbc/db2 adapter no longer adds jdbc_* methods to
JDBC::Database.
* Sequel no longer issues deprecation warnings on ruby 1.8.7.
Sequel 5 will drop support for ruby 1.8.7, and it doesn't make
sense to issue a deprecation warning if you couldn't upgrade
anyway.
= Backwards Compatibility
* When specifying the :fields option to a nested_attributes
setter, set_fields is now used internally instead of set_only.
set_fields has been recommended over set_fields since it's
introduction in Sequel 3.12, but nested_attributes was added
in Sequel 3.4, before set_fields was available. The result
of this change is that if additional fields are provided that
do not match the fields in the :fields option, they will be
ignored instead of an exception being raised.
* When specifying a function name using a Sequel::SQL::Identifier
instance, the function name is no longer quoted unless
Sequel::SQL::Function#quoted is used to create a quoted
function. The reason for this is to make converting virtual
row method block code easier.
# Before
Sequel.function(Sequel[:a]) # "a"()
# Now
Sequel.function(Sequel[:a]) # a()
Sequel.function(Sequel[:a]).quoted # "a"()
* When passing an SQL::PlaceholderLiteralString instance to a
dataset filtering method, the placeholder string is now always
wrapped in parentheses:
ds.where(Sequel.lit('? OR ?', :a, :b)).where(:c)
# Before: WHERE a OR b AND c
# Now: WHERE (a OR b) AND c
This is more of a bugfix than a backwards compatibility issue,
but is listed in the backwards compatibility section as there
may be applications that could break due to this change.
* Model.subset now calls Model.dataset_module.subset, instead of
the other way around. If your code depends on this, you will
need to make modifications.
* The private Database#column_definition_order method no longer
uses const_get(:COLUMN_DEFINITION_ORDER). External adapters
that defined COLUMN_DEFINITION_ORDER but did not override
this method must now override this method.
* The private Database#native_function_name method no longer
uses const_get(:EMULATED_FUNCTION_MAP). External adapters
that defined EMULATED_FUNCTION_MAP but did not override
this method must now override this method.
sequel-5.63.0/doc/release_notes/4.47.0.txt 0000664 0000000 0000000 00000004126 14342141206 0017773 0 ustar 00root root 0000000 0000000 = Deprecated Features
* Setting an invalid dataset for a model is now deprecated.
Historically, Sequel has swallowed exceptions for this to keep
backwards compatibility, but it generally just results in code
breaking later. To allow invalid datasets to be used:
Sequel::Model.require_valid_table = false
* The association_autoreloading and many_to_one_pk_lookup plugins
are now deprecated. They were moved from plugins to standard
model behavior in Sequel 4.0, and have been no-ops since.
* The pg_typecast_on_load plugin is now deprecated. It is only useful
on the already deprecated do and swift adapters.
= New Features
* Database#with_server in the server_block extension now accepts an
optional second argument for the read only server to use. This
allows for overriding the default server while providing a separate
default for read only queries:
DB.with_server(:server1, :server1ro) do
DB[:a].all # Uses server1ro
DB[:b].insert(1) # Uses server1
end
* Model.default_association_type_options has been added, allowing the
ability to set default options per association type. This can be
used to make some association types read_only by default:
opts = Sequel::Model.default_association_type_options
opts[:one_to_many] = opts[:many_to_many] = {:read_only=>true}
* Database#views on PostgreSQL now accepts a :materialized option to
return materialized views instead of regular views.
= Other Improvements
* Setting Sequel::Model.require_valid_table = true no longer raises
an exception when using a valid dataset that selects from a subquery
or table returning function or uses a join.
* The defaults_setter plugin now inherits any custom default
values when subclassing.
* The schema_dumper extension now handles Oracle 11g XE behavior of
appending not null to the database type.
= Backwards Compatibility
* External callers of Database#check_non_connection_error (private
method) should update their code to call it with a true or false
argument specifying whether to raise an error for exceptions that
are not connection errors.
sequel-5.63.0/doc/release_notes/4.48.0.txt 0000664 0000000 0000000 00000026730 14342141206 0020001 0 ustar 00root root 0000000 0000000 = Deprecated Features
* The identifier_columns plugin is now deprecated. There is no reason
to use it when Sequel.split_symbols = false, which will be the
default in Sequel 5.
* The filter_having, hash_aliases, and sequel_3_dataset_methods
extensions are now deprecated. They only existed for backwards
compatibility with Sequel 3.
* The query_literals extension is now deprecated. It changes behavior
in a way that makes SQL injections more likely.
* The meta_def extension is now deprecated. It is no longer necessary,
since on ruby 1.9+ you can use define_singleton_method.
* The empty_array_ignore_nulls extension has been deprecated. It
has been a no-op since Sequel 4.25.0.
* The cti_base_model, cti_key, and cti_model_map class methods in
the class_table_inheritance plugin are now deprecated. Use
cti_models.first instead of cti_base_model, sti_key
instead of cti_key, and sti_model_map instead of cti_model_map.
* The :strict option in the nested_attributes plugin is now deprecated.
Switch to using the :unmatched_pk option.
* Database#reset_conversion_procs on PostgreSQL is now deprecated.
There will be no need to call it in Sequel 5.
* Using global conversion procs added by the pg_* extensions, without
loading the pg_* extension into the Database instance, are now
deprecated. Additionally, using PGArray.register or PGRange.register
to register global types is now also deprecated. Use
Database#register_array_type or Database#register_range_type instead
to register the types on a per-Database basis.
* Treating :natural_inner join type as NATURAL LEFT JOIN on MySQL is
now deprecated. MySQL doesn't support NATURAL INNER JOIN, but if
you were going to convert it, NATURAL JOIN would make more sense.
* Unexpected values passed to Dataset#insert_conflict on SQLite are
now deprecated. Only values that result in one of the following
strings will be allowed in Sequel 5: ROLLBACK, ABORT, FAIL, IGNORE,
and REPLACE.
* The Dataset#and, #exclude_where, #interval, and #range methods
are now deprecated. Undeprecated copies are now available in the
new sequel_4_dataset_methods extension.
* Model.<< is now deprecated. Intuitivately, you would except this
to call Model.create and return the Model class, but it calls <<
on the model's dataset and returns the dataset.
* The Sequel::Postgres::PG_TYPES constant is now deprecated. All
conversion procs should now be added on a per-Database basis using
add_conversion_proc or add_named_conversion_proc. The following
private Database methods related to conversion procs are now
deprecated, though some are still called internally and therefore
do not have deprecation warnings:
* add_named_conversion_procs
* conversion_procs_updated
* convert_named_procs_to_procs
* copy_conversion_procs
* get_conversion_procs
Related to this, loading the sequel/adapters/utils/pg_types.rb file
is now deprecated.
* The following adapter or database specific global accessors for setting
defaults are now deprecated:
* Sequel::DB2.use_clob_as_blob
* Sequel::IBMDB.convert_smallint_to_bool
* Sequel::MySQL.convert_invalid_date_time
* Sequel::MySQL.convert_tinyint_to_bool
* Sequel::MySQL.default_charset
* Sequel::MySQL.default_collate
* Sequel::MySQL.default_engine
* Sequel::Postgres.use_iso_date_format
* Sequel::Postgres.client_min_messages
* Sequel::Postgres.force_standard_strings
* Sequel::SqlAnywhere.convert_smallint_to_bool
Use the Database instance accessors or Database options instead to
change behavior.
* The following adapter or database specific dataset mutation methods are
now deprecated:
* convert_smallint_to_bool= (ibmdb adapter, SQLAnywhere)
* convert_types= (jdbc adapter)
* mssql_unicode_strings= (Microsoft SQL Server)
Use the with_* methods which return a modified copy of the dataset
instead of these mutation methods.
* The Dataset#non_sql_options private method is now deprecated.
External adapters that overrode this method should switch to
overriding Dataset#non_sql_option?.
* The Database#timestamp_convertor private method in the jdbc adapter
is now deprecated. Users should switch to method(:timestamp_convert).
* Modification of the Sequel::JDBC::TypeConvertor class is now
deprecated. External jdbc subadapters that were using this to add
custom conversion procs should be modified.
* Having the pg_row extension respect conversion procs for subtypes
added after the registeration of the composite type is now deprecated.
Now, all subtypes should have the appropriate conversion proc added
before the composite type is registered.
* Array#sql_array in the core_extensions extension is now deprecated.
Switch to using Array#sql_value_list.
* The SEQUEL_POSTGRES_USES_PG constant added by the postgres adapter
is now deprecated. Sequel::Postgres::USES_PG should be used instead.
* Many more internal Sequel constants have been deprecated.
= New Features
* The Model#to_json and Dataset#to_json methods in the json_serializer
plugin now support a block. This block is called with the
hash/array that would have be serialized to JSON, and the block
should return the object to serialize. This makes it easy to
customize the JSON output by adding new entries, or wrapping the
object in other object.
The Dataset#to_json method supports an :instance_block option, which
should be a proc that will be passed to Model#to_json.
In order to implement this, Sequel.object_to_json now passes any
block given to the to_json call on the object. If you are
overriding Sequel.object_to_json, you are responsible for making
sure the block is passed appropriately.
* The association_pks plugin now supports a
:association_pks_use_associated_table association option for
many_to_many associations. If this option is used, instead of just
looking at the join table, the association_pks getter will get the
primary keys from the associated table. This can be useful if the
association's right_primary_key does not match the associated
model's primary key, and you are interested in the primary keys of
the associated objects. If this option is used, no association_pks
setter method is created.
* Dataset#as_hash has been added as a replacement to #to_hash.
If you want, you can now undef_method :to_hash and use
as_hash and things will work. Doing so can work around
problems when using keyword argument splats in ruby 2.0+.
For example:
def foo(*a, **b)
end
foo(City.order(:id))
results in foo being called with a being [] and b being
City.order(:id).to_hash, which is unexpected and undesired
behavior. If you want to use keyword argument splats or other
places where ruby will call to_hash implicitly if it is defined,
using undef_method :to_hash is recommended.
* A Database#add_conversion_proc method has been added on
PostgreSQL. This method takes a type OID and either a block
or a callable argument to use as the conversion proc for the
type OID.
* The following adapter or database specific Database accessors
have been added for changing settings on a per-Database basis:
* convert_smallint_to_bool (ibmdb adapter)
* default_charset (MySQL)
* default_collate (MySQL)
* default_engine (MySQL)
* use_clob_as_blob (DB2)
* A Dataset#with_convert_types method has been added to the jdbc
adapter, for returning a modified dataset with the convert_types
setting changed.
= Other Improvements
* Using the postgres adapter with pg 0.21.0 no longer results in
deprecation warnings.
* When using the class_table_inheritance plugin and using a direct
subclass of the parent class that does not use a separate table,
as well as using the :alias option, the insert SQL used is now
correct. Previously, it attempted to insert into a subquery,
which is not valid SQL.
Additionally, the dataset for such a model no longer uses a
subquery, since there is no reason to do so as there is no join.
* Model.skip_auto_validations(:not_null) in the auto_validations
plugin now skips not null checks for columns with default
values, in addition to skipping not null checks for columns
without default values.
* The static_cache plugin now supports the options hash argument
to to_hash and to_hash_groups. Currently, it only supports
the :hash option, since the :all option doesn't make sense.
* When touching associations in the touch plugin, clear the
cached association, because otherwise the cached values will
be stale.
* The validation_class_methods plugin no longer requires the
blank extension.
* The validation_helpers plugin methods that support the
:allow_blank option now work correctly if the blank extension
is not loaded.
* Loading the column_conflicts plugin into a model a second time
no longer removes existing column conflict settings.
* On SQLite 3.8.8+, indexes automatically created from unique
constraints are now included in Database#indexes output.
* On SQLite 3.8.8+, partial indexes are now excluded from
Database#indexes output.
* Database#indexes on PostgreSQL 9.5+ now uses a simpler query
with the array_position function.
* Database#foreign_key_list on PostgreSQL now uses a single
query instead of two queries, and also uses the array_position
function on PostgreSQL 9.5+ to simplify the queries.
* On PostgreSQL and Derby, when calling Database#create_table
with the :ignore_index_errors option inside a transaction,
a savepoint is used around each index creation so that an
index error does not cause the entire transaction to fail.
A savepoint is also used on Microsoft SQL Server, but it
appears that Microsoft SQL Server rolls back the entire
transaction if CREATE INDEX returns an error, instead of
just rolling back to the savepoint.
* Encoding is now preserved when parsing PostgreSQL arrays in the
pg_array extension.
* Database#copy_table in the postgres adapter now does not
hide the underlying exception if an exception is raised
during processing.
* Database#copy_into in the jdbc/postgresql adapter now does not
hide the underlying exception if an exception is raised
during processing.
* Database#copy_into in the jdbc/postgresql adapter now
respects the :server option for using a specific shard.
* Calling #reset_conversion_procs on Database instance that
uses the pg_hstore extension now results in the hstore
type still being parsed. Previously, the hstore conversion
proc would be dropped.
* The postgres adapter no longer monkey-patches postgres-pr if it
uses that as the driver.
* Multiple thread-safety issues in the mock adapter have been
fixed.
* Thread safety issues when simultaneously loading multiple
adapters that access PostgreSQL have been fixed.
* Hash allocations have been reduced in the csv_serializer,
json_serializer, and xml_serializer plugins.
* The deprecated Sequel::Model::ANONYMOUS_MODEL_CLASSES
constant is now correctly populated with classes created
by Sequel::Model(). This was broken in starting in
Sequel 4.45.0.
= Backwards Compatibility
* The pg_array_associations plugin now loads the pg_array
extension into the Database instance if it is not already
loaded. This can break cases where the pg_array_associations
plugin is used on a non-PostgreSQL database.
* Support for using the old postgres driver has been removed
from the postgres adapter. The postgres adapter now only
supports pg and postgres-pr.
* When the postgres-pr driver is being used by the postgres
adapter, connecting to a database is only allowed if
standard strings are being forced (the default).
sequel-5.63.0/doc/release_notes/4.49.0.txt 0000664 0000000 0000000 00000020110 14342141206 0017764 0 ustar 00root root 0000000 0000000 = Forward Compatibility
Sequel 4.49.0 will be the last minor release of Sequel 4. While the
vast majority of backwards incompatible changes in Sequel 5 have
deprecation warnings in 4.49.0, there are a few changes that do
not. Here is a brief list of changes coming in Sequel 5 that
do not have deprecation warnings (note that this list may not be
exhaustive):
* The {before,after,around}_validation hooks will always be called
when saving, even if the validate: false option is used. This
will allow you to use the before_validation hook to make changes
to the model instance that are required before validation and
before saving even if not validating. Currently, you would have
to use both a before_save and before_validation hook, which would
both be run on normal instance saving.
* Getting values for newly created model instances after insertion
now happens before after_create is called, instead of after.
This behavior is currently available via the before_after_save
plugin, and and will become the default behavior.
* Sequel will now immediately attempt to the connect to the database
when a Database instance is created, in order to fail fast. This
behavior is currently available via the test: true option, and
will become the default behavior. You can force not testing the
connection by using the test: false option.
* The validates_unique method in the validation_helpers plugin will
now only check for uniqueness by default if the record is new or
one of the related columns has been modified by default. You can
use only_if_modified: false to force the uniqueness check.
* Database schema methods and schema generator methods will return
nil instead of some internal value.
* Many cases where Sequel uses send internally will be switched to
public_send so they only call public methods, unless it is
specifically expected that they will call private methods.
* Model association hooks will be nil instead of empty arrays by
default. They will only be arrays if that hook has been set for
the association.
* Internal uses of instance_eval with a block will be changed to
instance_exec. This will allow them to be used with lambdas that
take no arguments. Unfortunately, it will break the case where a
lambda is currently used that takes one argument.
* Most internal constants will be frozen, unless there is a
requirement that they be modified at runtime.
* The @was_new instance variable set during model instance creation
will be removed.
= Deprecated Features
* Model association before callbacks returning false canceling the
action is now deprecated. The callbacks should now call
Model#cancel_action to cancel the action.
* Loading plugins by requiring them via sequel_#{plugin} is now
deprecated. Affected plugins should move the plugin file so it can
be required via sequel/plugins/#{plugin}.
* In the mock adapter, Dataset#autoid=, #_fetch=, and #numrows= are
now deprecated. They modified the dataset itself, which would not
work for frozen datasets. Dataset#with_autoid, #with_fetch,
and #with_numrows should be used instead, which return a modified
copy.
* In the null_dataset extension, Dataset#nullify! is now deprecated.
It modified the dataset itself, which would not work for frozen
datasets. Dataset#nullify should be used instead, which returns a
modified copy.
* Modifying the validation_helpers plugin DEFAULT_OPTIONS hash is now
deprecated. Any change to the default options should be done by
overriding the Model#default_validation_helpers_options private
method.
* Modifying ConnectionPool::CONNECTION_POOL_MAP to support an
external connection pool is now deprecated. To use an external
connection pool, pass the pool class via the :pool_class
Database option. Additionally, using a :pool_class option that
is not a class or a symbol for one of the default connection
pools is also deprecated.
* ConnectionPool#created_count is now deprecated. This method was
misnamed, as it was in alias to size, but the name implies it
returns how many connections have been created, as opposed to how
many connections are still in the pool.
* Sequel::SQL::Function#f is now deprecated, switch to using #name
instead.
* Sequel::SQL::AliasedExpression#aliaz is now deprecated, switch
to using #alias instead.
* The :eager_loading_predicate_key association option and
eager_loading_predicate_key association method are now deprecated.
The predicate_key option and method should be used instead.
* The cti_columns class method in the class_table_inheritance plugin
is now deprecated.
* The serialized_columns class method in the serialization plugin
is now deprecated.
* Having ds.join_table(:table, :cross, :a=>:b) be treated as an
inner join on MySQL is now deprecated.
* Sequel::IBMDB::Connection#prepared_statements= in the ibmdb
adapter is now deprecated.
* Additional internal constants are now deprecated.
= New Features
* Database#extend_datasets and Database#with_extend if given a block
now use a Dataset::DatasetModule instance instead of a plain Module
instance. Dataset::DatasetModule is a subset of
Model::DatasetModule, and allows for the easy creation of dataset
methods that can perform caching for frozen datasets.
Defining dataset methods is done by calling methods with the same
name as dataset methods inside the extend_datasets or with_extend
block:
DB.extend_datasets do
order :by_id, :id
select :with_id_and_name, :id, :name
where :active, :active
end
This is equivalent to:
DB.extend_datasets do
def by_id
order(:id)
end
def with_id_and_name
select(:id, :name)
end
def active
where(:active)
end
end
Except that for frozen datasets (the default in Sequel 5),
code like:
100.times do
DB[:table].active.with_id_and_name.by_id
end
will only allocate 4 datasets instead of 400, and can be
3-4 times faster.
* Dataset#where_{all,each,single_value} are now core dataset methods
instead of just model dataset methods. These methods allow you to
replace:
dataset.where(cond).all
dataset.where(cond).each{}
dataset.where(cond).single_value
with:
dataset.where_all(cond)
dataset.where_each(cond){}
dataset.where_single_value(cond)
The advantage of #where_{all,each,single_value} is that frozen
datasets can take potentially advantage of caching and perform
70%-300% faster.
* Oracle 12 native limit/offset support is now supported, which
in particular makes offset queries much faster as they don't
have to be emulated using the row_number window function.
* Dataset#paged_each in the mysql2 adapter now supports a
:stream=>false option to disable streaming and fallback to
the default implementation.
* The postgres adapter now supports the :sslrootcert option
directly, you no longer need to specify it using the
:driver_options hash.
* The single_table_inheritance plugin now supports an
sti_class_from_sti_key method for getting the appropriate
subclass for the given key.
= Other Improvements
* Using the dataset_associations plugin with a many_through_many
association that joins to the same table multiple times is now
handled correctly by automatically aliasing the table
appropriately.
* On Ruby 2.1+, Sequel::Error#cause will use wrapped_exception
if one is set. This doesn't result in different behavior in
most cases, but it can in cases where nested exception handling
is done and Sequel tries to raise the most relevant exception.
* Using the composition plugin with the :mapping option now works
correctly when using the column_conflicts plugin.
* The validation_helpers plugin's validates_max_length method
now correctly gets the default :nil_message option from
the default_validation_helpers_options method instead of
looking at the plugin defaults.
* The duplicate_columns_handler extension no longer makes the
Dataset#columns= method public.
* On H2 1.4+, alter_table add_primary_key now works correctly.
* The jdbc/sqlserver adapter's datetimeoffset type handling now
works with more JDBC driver versions.
sequel-5.63.0/doc/release_notes/4.5.0.txt 0000664 0000000 0000000 00000002260 14342141206 0017702 0 ustar 00root root 0000000 0000000 = New Features
* An mssql_optimistic_locking plugin has been added. This is similar
to the regular optimistic_locking plugin, but instead of using an
integer lock column, it uses a timestamp/rowversion lock column.
* Database#create_table with the :temp=>true option on PostgreSQL now
supports an :on_commit option. This option can be set to :drop or
:delete_rows to either drop or empty the temporary table on
transaction commit.
= Other Improvements
* Dataset#insert no longer errors on PostgreSQL if the related table
is a placeholder literal string.
* Unique constraints are now copied when emulating alter_table
operations on SQLite.
* Clob column values are no longer returned as SQL::Blob instances
by the db2 and ibmdb adapters unless use_clob_as_blob is true.
* SQL::Blob objects now work correctly as prepared statement
arguments in the jdbc/db2 adapter if use_clob_as_blob is false.
= Backwards Compatibility
* The Model.primary_key array for models with composite keys is now
frozen.
* On DB2, use_clob_as_blob now defaults to false instead of true.
* Sequel no longer uses RubyForge. The Sequel website is now located
at http://sequel.jeremyevans.net.
sequel-5.63.0/doc/release_notes/4.6.0.txt 0000664 0000000 0000000 00000001767 14342141206 0017716 0 ustar 00root root 0000000 0000000 = New Features
* Database#call_mssql_sproc is now available for calling
stored procedures on Microsoft SQL Server, including the use
of output parameters.
* The Database#{commit,rollback}_prepared_transaction methods now
support a :server option for the server on which to operate.
= Other Improvements
* On Microsoft SQL Server 2012, the native OFFSET/FETCH support
is now used for offsets, instead of emulating support via the
ROW_NUMBER window function.
* Eager loading is now skipped when doing eager(...).naked.all on
a model dataset, instead of raising an error. This can fix issues
when the eager_each plugin is used.
* A couple additional disconnection errors are now detected in the
jdbc/postgresql adapter.
* The tinytds adapter now handles returning rows when the fields
are not immediately available.
* RuntimeErrors raised by oci8 are now handled correctly in the
oracle adapter.
* Sequel's specs now work with RSpec 3, while still running
correctly on RSpec 1.3 and 2.
sequel-5.63.0/doc/release_notes/4.7.0.txt 0000664 0000000 0000000 00000007622 14342141206 0017713 0 ustar 00root root 0000000 0000000 = New Features
* Alternatives for the more complex virtual row method calls have
been added:
# Window Functions using SQL::Function#over
# before: select{sum(:over, :args=>:col1, :partition=>:col2){}}
select{sum(:col1).over(:partition=>:col2)}
# count(*) using SQL::Function#*
# before: select{count(:*){}}
select{count{}.*}
# count(distinct col) using SQL::Function#distinct
# before: select{count(:distinct, :col){}}
select{count(:col).distinct}
Additionally, schema qualified functions are now supported via
SQL::QualifiedIdentifier#function, and quoted functions are now
supported via SQL::Identifier#function on some databases:
# "func"("col")
select{func.function(:col)}
# "schema"."func"("col1")
select{schema__func.function(:col1)}
If the database does not support quoting function names, then
Sequel will not quote them.
* An update_or_create plugin has been added, for updating a matching
object if one exists, or creating an object if it does not. For
example, the following code will update the number of copies sold
for album with the name 'Hello', or it will create an album with
the name 'Hello' and 1000 number of copies sold:
Album.plugin :update_or_create
Album.update_or_create(:name=>'Hello') do |album|
album.num_copies_sold = 1000
end
You can also use a shorter form of this, with two hashes:
Album.update_or_create({:name=>'Hello'}, {:num_copies_sold=>1000})
This plugin also adds a method named find_or_new, which does the
same thing as update_or_create, except it doesn't persist any
changes.
* A :raise_on_save_failure option has been added for one_to_many,
pg_array_to_many, and many_to_pg_array associations. This mirrors
the Model.raise_on_save_failure setting, and if set to false, it
will make the add/remove methods return nil instead of raising
an error if there is a validation/hook error when saving the
associated record.
* The validates_unique validation in validation_helpers now supports a
:dataset option to provide the base dataset to use to check
uniqueness. This is useful when the model itself uses a filtered
dataset, but the unique index in the database is on an unfiltered
dataset.
The auto_validations plugin uses this option to ensure that unique
validations are setup correctly in subclasses using single table
inheritance.
= Other Improvements
* Sequel now automatically rolls back transactions in killed threads
on ruby 2.0+. It is still impossible to do so on ruby 1.9.
* In the instance_hooks plugin, validation instance hooks are now
not cleared until after a successful save.
* Composite unique key constraint violations are now recognized
and raised as Sequel::UniqueConstraintViolation on SQLite.
* Primary key unique constraint violations are now recognized and
and raised as Sequel::UniqueConstraintViolation on Microsoft
SQL Server and SQLAnywhere.
* If an exception occurs when using a cursor in the postgres adapter,
and an exception also occurs when closing the cursor when cleaning
up, the initial exception is now raised.
* You can now get tables in a specific schema in the jdbc adapter
using the :schema option to Database#tables. This was already
supported in most jdbc subadapters because they implement #tables
using database specific code instead of looking at the JDBC
metadata, but it should now work for all jdbc subadapters.
* Sequel::SQLTime#to_s is now defined and returns a string in
HH:MM:SS format (leaving off the date).
= Backwards Compatibility
* The odbc adapter's :driver option is no longer deprecated, as reports
were received that it still works.
* If you were re-adding instance validation hooks using instance_hooks
after a save failure, and then retrying the save, you may now end up
with duplicate validations. You no longer need to re-add validation
hooks unless the object was saved successfully.
sequel-5.63.0/doc/release_notes/4.8.0.txt 0000664 0000000 0000000 00000015511 14342141206 0017710 0 ustar 00root root 0000000 0000000 = New Features
* A one_through_one association type has been added. This is similar
to the many_to_many association type in that it uses a join table,
but it returns a single record instead of an array of records.
This is designed for cases where the foreign key in the join table
that references the current table has a unique constraint, or where
you want to use an order to just pick the first matching record.
Similarly, the many_through_many plugin now also offers a
one_through_many association.
* An association_join method has been added to model datasets, for
setting up joins based on associations. This basically does the
same join that eager_graph would do, but does not make the other
changes that eager_graph makes.
Unlike eager_graph (which uses LEFT OUTER JOINs by default),
association_join uses INNER JOINs, but there are also
association_*_join methods (e.g. association_left_join) for
using different join types.
Similar to eager_graph, you can use cascading of associations or
multiple associations.
Album.association_join(:artist, :tracks)
Artist.association_left_join(:albums=>:tracks)
* Dataset#eager_graph_with_options has been added for model
datasets. It currently supports a :join_type option, for
overriding the type of join to use on a per-call basis, as well
as a :limit_strategy option. The API is similar to eager_graph,
except that the associations to eagerly load are passed in as
a single argument, and it takes an options hash.
The :limit_strategy option works similarly to the
:eager_limit_strategy option when eagerly loading. If set to
true and the database supports window functions, it will join
the current dataset to a subquery that uses a window function
to correctly restrict the join to only those objects that fall
within the association's limit/offset.
The :limit_strategy option is not on by default. It is possible
for it to perform significantly worse than the default strategy
(which uses array slicing in ruby). The :limit_strategy
significantly changes the SQL used, and can change the results
of the query if any filters/orders related to the association
are used.
It's recommended you only use the :limit_strategy option if you
are experiencing a bottleneck and you have benchmarked that it
is faster and still produces the desired results.
Artist.eager_graph_with_options(:first_10_albums,
:limit_strategy=>true)
# SELECT artists.id, artists.name,
# first_10_albums.id AS first_10_albums_id,
# first_10_albums.name AS first_10_albums_name,
# first_10_albums.artist_id,
# first_10_albums.release_date
# FROM artists
# LEFT OUTER JOIN (
# SELECT id, name, artist_id, release_date
# FROM (
# SELECT *, row_number() OVER (PARTITION BY tracks.album_id)
# AS x_sequel_row_number_x
# FROM albums
# ) AS t1 WHERE (x_sequel_row_number_x <= 10)
# ) AS first_10_albums ON (first_10_albums.artist_id = artists.id)
* Dataset#full_text_search on PostgreSQL now supports :plain and
:phrase options. :plain takes the search terms as a single
string, and searches for rows where all terms are used.
:phrase is similar to :plain, but also adds a substring search
to ensure that the string given appears verbatim in the text.
* A :graph_order association option has been added, for using a
different order when using eager_graph. This is mostly
designed for cases where :order should be qualified in other
cases, but using a qualification breaks eager_graph because the
correct qualifier is not known until runtime.
* SQL::AliasedExpression#alias has been added as an alias for #aliaz.
= Other Improvements
* Sequel will now automatically use an eager limit strategy for
*_one associations that use an :order option. For associations
that are truly one-to-one, an :order option is not needed, so it
only makes sense to have an :order option if the association
could theoretically return multiple results (in which case an
eager limit strategy is helpful).
* The queries that Sequel uses to filter by associations when
those associations have conditions are now simpler and easier
for the database to execute.
* The queries that Sequel uses for dataset associations now handle
cases where unqualified identifiers were used in the receiving
dataset that would be made ambiguous by a join.
* A limit strategy is now used when filtering by associations if
the association has a limit and the database supports window
functions. This allows Sequel to setup a correct filter in
such cases.
Artist.where(:first_10_albums=>Album[1]).all
# SELECT *
# FROM artists
# WHERE (artists.id IN (
# SELECT albums.artist_id
# FROM albums
# WHERE ((albums.artist_id IS NOT NULL) AND (albums.id IN (
# SELECT id FROM (
# SELECT albums.id, row_number() OVER
# (PARTITION BY albums.artist_id ORDER BY release_date)
# AS x_sequel_row_number_x
# FROM albums
# ) AS t1
# WHERE (x_sequel_row_number_x <= 10)
# )) AND (albums.id = 1))))
* A limit strategy is now used in the dataset_associations plugin
if the association has a limit and the database supports window
functions. This makes the resulting datasets return correct
results.
Artist.first_10_albums
# SELECT *
# FROM albums
# WHERE ((albums.artist_id IN (
# SELECT artists.id FROM artists)
# ) AND (albums.id IN (
# SELECT id FROM (
# SELECT albums.id, row_number() OVER
# (PARTITION BY albums.artist_id ORDER BY release_date)
# AS x_sequel_row_number_x
# FROM albums
# ) AS t1
# WHERE (x_sequel_row_number_x <= 10)
# )))
# ORDER BY release_date
* You can now pass symbols with embedded qualifiers or aliases,
as well as SQL::Identifier, SQL::QualifiedIdentifier, and
SQL::AliasedExpression objects as the first argument to
Dataset#graph.
* The nested_attributes plugin now automatically handles presence
validations on foreign keys when creating associated objects.
It now sets the foreign key value (or a placeholder value)
before validating such objects.
* Offsets on *_one associations are now respected when using
eager_graph.
* eager graphing *_many associations with offsets no longer breaks
if there are no associated results.
* Database#register_array_type in the pg_array extension now works
correctly if there is no existing scalar conversion proc for
the type.
* Unique, foreign key, and not null constraint violations are now
recognized correctly on SQLite 3.8.2+.
* The odbc adapter now returns fractional seconds in timestamps.
* The obdc/mssql adapter now inputs timestamps with 3 decimal
places.
= Backwards Compatibility
* The private Model.apply_window_function_eager_limit_strategy
method has been removed.
sequel-5.63.0/doc/release_notes/4.9.0.txt 0000664 0000000 0000000 00000017701 14342141206 0017714 0 ustar 00root root 0000000 0000000 = Performance Enhancements
* Dataset::PlaceholderLiteralizer has been added as an optimization
framework. This allows you to record changes to a given dataset
using placeholder arguments, and later quickly execute the query
providing values for the placeholders. This is similar in idea
to prepared statements, except that the SQL for each query can
change depending on the values for the placeholders.
Using this optimization framework, generating the SQL for query
is about 3x faster, and since SQL generation time is a significant
portion of total time for simple queries, simple queries can
execute up to 50% faster.
There are two APIs for this optimization framework. There is a
lower level dataset API:
loader = Sequel::Dataset::PlaceholderLiteralizer.
loader(DB[:items]) do |pl, ds|
ds.where(:id=>pl.arg).exclude(:name=>pl.arg).limit(1)
end
loader.first(1, "foo")
# SELECT * FROM items WHERE ((id = 1) AND (name != 'foo')) LIMIT 1
loader.first([1, 2], %w"foo bar")
# SELECT * FROM items WHERE ((id IN (1, 2)) AND
# (name NOT IN ('foo', 'bar'))) LIMIT 1
There is also a higher level model API (Model.finder):
class Item < Sequel::Model
# Given class method that returns a dataset
def self.by_id_and_not_name(id, not_name)
where(:id=>id).exclude(:name=>not_name)
end
# Create optimized method that returns first value
finder :by_id_and_not_name
end
# Call optimized method
Album.first_by_id_and_not_name(1, 'foo')
# SELECT * FROM items WHERE ((id = 1) AND (name != 'foo')) LIMIT 1
Model.finder defaults to creating a method that returns the first
matching row, but using the :type option you can create methods
that call each, all, or get. There is also an option to choose the
method name (:name), as well as one to specify the number of
arguments to use if the method doesn't take a fixed number
(:arity).
Finally, Model.find, .first, and .first! now automatically use an
optimized finder if given a single argument. Model.[] uses an
optimized finder if given a single hash, and Model.[], .with_pk,
and .with_pk! use an optimized finder if the model has a composite
primary key. In all of these cases, these methods are about 50%
faster than before.
* The pure-ruby PostgreSQL array parser that ships with Sequel has
been replaced with a strscan-based parser. This parser avoids
O(n^2) performance for arrays with multibyte strings, and in general
is much faster. Parsing an array with a single string with 100,000
multibyte characters is about 1000x faster, and now about half the
speed of the C implementation in sequel_pg.
* Dataset#paged_each now has a :strategy=>:filter option that
dramatically improves performance, especially if the columns
being ordered by are indexed.
Unfortunately, there are enough corner cases to this approach
that it cannot be used by default. At the least, the dataset
needs to be selecting the columns it is ordering by, not aliasing
the columns it is ordering by in the SELECT clause, not have
NULLs in any of the columns being ordered by, and not itself use
a limit or offset.
If you are ordering by expressions that are not simple column
values, you can provide a :filter_value option proc that takes the
last retrieved row and array of order by expressions, and returns
an array of values in the last retrieved row for those order by
expressions.
* In the postgres adapter, Dataset#paged_each now automatically uses
a cursor for improved performance.
* In the mysql2 adapter, Dataset#paged_each now automatically uses
streaming for improved performance, if streaming is supported.
* Dataset#with_sql_{each,all,first,single_value,insert,update}
have been added. These methods take specific SQL and execute
it on the database, returning the appropriate value. They
are significantly faster than the previous approach of
with_sql(SQL).{each,all,first,single_value,insert,update},
as they don't require cloning the dataset.
= New Features
* Database#create_join_table! and #create_join_table? have been added,
for consistency with #create_table! and #create_table?.
* A :hold option has been added to Dataset#use_cursor in the postgres
adapter, which uses WITH HOLD in the query, allowing for usage of
the cursor outside the enclosing transaction. When :hold is used,
Sequel does not automatically use a transaction around the cursor
call.
* Dataset#where_current_of has been added to the postgres adapter,
for updating rows based on a cursor's current position. This can
be used to update a large dataset where new values depend on
some ruby method, without keeping all rows in memory.
ds = DB[:huge_table]
ds.use_cursor(:rows_per_fetch=>1).each do |row|
ds.where_current_of.update(:column=>ruby_method(row))
end
* A current_datetime_timestamp extension has been added, for
creating Time/DateTime instances that are literalized as
CURRENT_TIMESTAMP. When the dataset uses this extension, models
that use the touch and timestamps plugins will use
CURRENT_TIMESTAMP for the timestamps.
* The jdbc adapter now supports a :driver option, useful when
Sequel doesn't have direct support for the underlying driver, and
where java.sql.DriverManager.getConnection does not work
correctly due to Java class loading issues.
= Other Improvements
* Multiple corner cases in Dataset#eager_graph have been fixed.
* Calling Dataset#columns when using the eager_each plugin no
longer triggers eager loading.
* Database#column_schema_to_ruby_default is now a public method
in the schema_dumper extension.
* When validating associated objects for one_to_many and one_to_one
associations in the nested_attributes plugin, don't remove column
values if the association's foreign key is the associated model's
primary key.
* On PostgreSQL, Dataset#disable_insert_returning has been added
back. This disables the automatic use of RETURNING for INSERTs
for the dataset. This is necessary in cases where INSERT
RETURNING doesn't work, such as PostgreSQL <8.2 (or PostgreSQL
variants that forked before 8.2), or when using partitioning
with trigger functions, or conditional rules.
Note that if you use disable_insert_returning, insert will not
return the autoincremented primary key. You need to call
currval or lastval manually using the same connection to get
the value, or use nextval to get the value to use before
inserting.
* The pg_array extension now uses the correct database type when
typecasting values for smallint, oid, real, character, and varchar
arrays. Previously, Sequel did not use the correct database type
in some cases (e.g. text[] for a varchar[]), which resulted in
errors if the value was used in a filter expression.
* Additional unique constraint violations are now recognized on
SQLite.
* Check constraint violations are now recognized on SQLite >=3.8.2.
* Adapters that emulate bitwise operators now do so using an append
only design, similar to how all other queries are built in Sequel.
= Backwards Compatibility
* In some cases Sequel no longer adds superfluous parentheses when
constructing SQL strings. If you are testing for specific SQL,
this can cause test failures.
* The pg_array extension no longer recognizes the :typecast_method
option when registering an array type. The option allowed reuse
of an existing typecast method, but as that results in an incorrect
type at the database level, the option was fundementally broken.
* The internals of the PostgreSQL array parser have changed. If you
were relying on them, you'll need to update your code.
* Dataset#complex_expression_arg_pairs private method now returns
nested expression objects instead of an already literalized string
in some cases. Custom adapters that call this method will probably
need to be changed. It's recommended that such adapters switch to
using the new Dataset#complex_expression_emulate_append method if
possible.
sequel-5.63.0/doc/release_notes/5.0.0.txt 0000664 0000000 0000000 00000013051 14342141206 0017676 0 ustar 00root root 0000000 0000000 = Major Changes
* Datasets are now frozen by default. Since Sequel's inception,
datasets have used a method-chaining API that returned modified
copies, but previously they still supported direct mutation. Now,
datasets are always frozen and cannot be mutated. This allows many
additional default optimizations related to caching, and provides
greater thread safety.
ds = DB[:table]
# Before
ds.row_proc = lambda{|h| h}
# Now
ds = ds.with_row_proc(lambda{|h| h})
* Symbol splitting to create qualified and/or aliased identifiers is
now disabled by default. While symbol splitting allowed for shorter
code, it was not obvious and caused significant issues when using
column names with embedded double or triple underscores. Sequel now
offers many ways to create qualified and/or aliased identifiers.
# Before
:table__column # "table"."column"
# Now
:table__column # "table__column"
Sequel[:table][:column] # "table"."column"
# To get back historical behavior
Sequel.split_symbols = true
* Sequel no longer allows the use of plain ruby strings as SQL code
fragments in the dataset filtering methods, as that makes it
easier to introduce SQL injection vulnerabilities. You can use
Sequel.lit to create literal strings (SQL code fragments), which
makes it easier to do security auditing of applications using
Sequel.
# Before
DB[:table].where("column = 1").all
# Now
DB[:table].where(Sequel.lit("column = 1")).all
# or better
DB[:table].where(column: 1).all
# To get back historical behavior
DB.extension :auto_literal_strings
= Backwards Compatibility
* All adapters, extensions, plugins, features, and constants
deprecated in 4.49.0 have been removed. Before upgrading to Sequel
5.0.0, upgrade to 4.49.0 and fix all deprecation warnings.
* Support for ruby 1.8.7 has been dropped, the minimum ruby version is
now 1.9.2.
* The {before,after,around}_validation hooks are now always called
when saving, even if the validate: false option is used. This
allows you to use the before_validation hook to make changes
to the model instance that are required before validation and
before saving even if not validating.
* Getting column values for newly created model instances after
insertion now happens before after_create is called, instead of
after.
* Sequel now immediately attempts to the connect to the database
when a Database instance is created, in order to fail fast if the
connection parameters are invalid.
* The validates_unique method in the validation_helpers plugin
now only checks for uniqueness by default if the record is new or
one of the related columns has been modified by default.
* Database schema modification methods and schema generator methods
now return nil instead of some internal value.
* Many cases where Sequel used Kernel#send internally have been
switched to Kernel#public_send so they only call public methods.
* Model association hooks are now nil instead of empty arrays by
default.
* Internal uses of instance_eval with a block have been changed to
instance_exec. This allows them to be used with lambdas that
take no arguments.
* Most internal constants are now frozen, unless there is a
requirement that they be modified at runtime.
* The Model @was_new instance variable is now no longer set when
saving new model instances.
* The private Sequel::Postgres::PGArray::Parser#new_entry_buffer
method in the pg_array extension has been removed.
* Modifying Model.input_transformer_order in the input_transformer
plugin no longer has an effect.
= New Features
* Database#add_index :if_not_exists option is now supported on
PostgreSQL 9.5+.
* SQL::Subscript#expression has been added to retrieve the
expression that is subscripted.
= Other Improvements
* Threaded connection pools no longer block while new connections
are being made. Previously, attempting to establish a new
connection blocked all connection pool activity until the new
connection was made.
* Many minor performance improvements have been made.
* The class_table_inheritance plugin now raises an error during
Model#update if a query does not modify a single row, just as
the default Model#update does.
* ConnectionPool#size is now thread-safe in both threaded
connection pools. Internal callers that already have the
connection pool mutex should switch to using #_size (a new
private method).
* Registration of new serialization formats in the serialization
plugin is now thread-safe.
* If transactional schema modifications are not supported, a
savepoint will not automatically be created when adding
indexes for new tables inside transactions. This fixes issues
when making schema changes inside transactions on MySQL.
* Attempting to create a prepared statement using a dataset that
uses a delayed evaluation now raises an error, because the
prepared statement would not respect the delayed evaluation.
* The bin/sequel -M option now uses base 10. Previously, it
used the Kernel#Integer default, which was base 8 if there was
a preceding 0.
= Deprecated Features
These deprecated features will be removed in Sequel 5.1.0.
* Model.allowed_columns in the base plugin is now deprecated. Use
the whitelist_security plugin if you want to call it.
* Model use_after_commit_rollback class and instance accessors are
now deprecated.
* Defining the Model#_before_validation method is now deprecated.
You can change to using before_validation.
* The private Model.plugin_module_defined? method is now deprecated.
sequel-5.63.0/doc/release_notes/5.1.0.txt 0000664 0000000 0000000 00000001754 14342141206 0017706 0 ustar 00root root 0000000 0000000 = Improvements
* Database#copy_into in the jdbc/postgresql adapter now works
correctly when using multibyte characters in strings.
* The alter_table add_foreign_key method is now reversible when the
:foreign_key_constraint_name option is used.
* The jdbc/h2 and jdbc/hsqldb adapters now respect the
:foreign_key_constraint_name option.
* Calling Model.freeze on an already frozen model no longer raises
an error.
* An unnecessary database query is now avoided when loading the
pg_inet extension when the pg_array extension is already loaded.
* A better exception message is now used when migrating with an
empty migration directory.
= Backwards Compatibility
* Model.allowed_columns has been removed. Use the whitelist_security
plugin if you want to call it.
* Model use_after_commit_rollback class and instance accessors have
been removed.
* Support for the Model#_before_validation method has been removed.
* The private Model.plugin_module_defined? method has been removed.
sequel-5.63.0/doc/release_notes/5.10.0.txt 0000664 0000000 0000000 00000006663 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* Ruby 2.6+ endless ranges are now supported as condition specifier
values, using a >= operator for them:
DB[:t].where(c: 1...)
# SELECT * FROM t WHERE (c >= 1)
* Ruby 2.6+ endless ranges are now supported in the pg_range
extension:
DB[:t].where(id: 1).update(r: 1...)
# UPDATE t SET r = '[1,)' WHERE (id = 1)
* The :include option when creating indexes is now supported on
PostgreSQL 11, specifying additional columns to include in the index
without indexing them. This is useful to allow index only scans in
additional cases.
* The :tablespace option is now supported when creating tables,
indexes, and materialized views on PostgreSQL.
* The list plugin now supports a :top option, which can be used to
specify the top of the list. The default value for the top of the
list is 1, but using this option you can make the top of the list be
0.
= Other Improvements
* In the pg_array_associations plugin, filtering by associations for
many_to_pg_array associations now works correctly on PostgreSQL 11.
Previously it did not work on PostgreSQL 11 due to new restrictions
on using set returning functions in the the SELECT list.
* When setting the value of a column to the same value the column
already has, for a new model object that has not yet been persisted,
where the column is used as the foreign key for at least one
many_to_one association, do not clear any related associations from
the associations cache.
* In the pg_array extension, if there are separate conversion procs for
timetz and time types, the conversion proc for the timetz[] type now
correctly uses the conversion proc for the timetz type to convert
scalar values, instead of the conversion proc for the time type.
* Empty arrays and hashes are now correctly handled in
Dataset#{first,where_all,where_each,where_single_value} when a
cached placeholder literalizer is used.
* In the tree plugin, Model#{ancestors,descendants,self_and_siblings}
now work correctly when custom parent/children association names
are used.
* The inner loop of the postgres adapter row fetching code is now
2-3% faster.
* When using the postgres adapter with pg-0.18+, set a
type_map_for_queries for the connection to allow it to handle input
type casts for Integer, Float, TrueClass, and FalseClass values
without allocating strings.
* SQLTime.parse (and therefore Sequel.string_to_time) now respects the
SQLTime.date and Sequel.application_timezone settings.
* The jdbc/postgresql adapter now correctly parses timetz types.
* On JRuby 9.2.0.0, when handling BC timestamps without timezones in
the pg_extended_date_support extension, assume local time and not
UTC time if the database timezone is not specified and
Sequel.datetime_class is Time.
* Errors indicating that a MySQL database is in read-only mode are
now treated as disconnect errors in the mysql and mysql2 adapters,
for better behavior in failover scenarios.
* Sequel::Model datasets now support the use of IN/NOT IN operators
where the second argument for the operator (the right hand side) is
a set returning function. Previously, the Sequel::Model code
assumed the right hand side of an IN/NOT IN operator was a datasets
or array, since those are the only values where Sequel will
automatically create such an operator.
* Sequel no longer loads the strscan library in the pg_array extension
if it is not necessary because the parser from sequel_pg is used.
sequel-5.63.0/doc/release_notes/5.11.0.txt 0000664 0000000 0000000 00000007160 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now supports more window frame specification types when
using window functions. You can now provide the window frame
specification as a hash, and Sequel will format the correct
SQL. Specifically, this adds support for RANGE and GROUPS,
numeric offsets, and EXCLUDE on a database that supports it
(e.g. PostgreSQL 11+). Examples:
DB[:albums].select{function(c1).over(:partition=>c2, :order=>:c3,
:frame=>{:type=>:range, :start=>1, :end=>1})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3
# RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM albums
DB[:albums].select{function(c1).over(:partition=>c2, :order=>:c3,
:frame=>{:type=>:groups, :start=>[2, :preceding], :end=>[1, :preceding]})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3
# GROUPS BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM albums
DB[:albums].select{function(c1).over(:partition=>c2, :order=>:c3,
:frame=>{:type=>:range, :start=>:preceding, :exclude=>:current})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3
# RANGE UNBOUNDED PRECEDING EXCLUDE CURRENT ROW) FROM albums
* The SQLite 3.24+ ON CONFLICT clause to INSERT is now supported.
This support is very similar to the PostgreSQL support for the
same feature, also known as UPSERT (UPDATE if the row already
exists, INSERT if it does not). This support is different than
the previous support for INSERT ON CONFLICT REPLACE (also known as
INSERT OR REPLACE), but it uses the same method name in order to
be compatible with the PostgreSQL support. The new syntax requires
passing a hash to Dataset#insert_conflict. Examples:
DB[:table].insert_conflict({}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT DO NOTHING
DB[:table].insert_conflict(target: :a).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO NOTHING
DB[:table].insert_conflict(target: :a,
conflict_where: {c: true}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) WHERE (c IS TRUE) DO NOTHING
DB[:table].insert_conflict(target: :a,
update: {b: Sequel[:excluded][:b]}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2)
# ON CONFLICT (a) DO UPDATE SET b = excluded.b
DB[:table].insert_conflict(target: :a,
update: {b: Sequel[:excluded][:b]},
update_where: {Sequel[:table][:status_id] => 1}).insert(a: 1, b: 2)
# INSERT INTO TABLE (a, b) VALUES (1, 2) ON CONFLICT (a)
# DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
* Dataset#window for the WINDOW clause has been moved from the
PostgreSQL-specific support to core, and has been enabled on
MySQL 8+ and SQLAnywhere. This allows you to specify a shared
window specification in a query, which can be used by multiple
window functions.
= Other Improvements
* When using the static_cache plugin, Model.first when called without
a block and without arguments or with a single Integer argument now
uses the cached values instead of issuing a query.
* Using set_column_default with a nil value now correctly removes an
existing default value on MySQL when the column is NOT NULL.
* Window function support has been enabled on SQLAnywhere, since it
works correctly.
* Dumping schema for numeric/decimal columns with default values
now works correctly. This was broken starting in Sequel 5.9.0
due to changes to use BigDecimal() instead of BigDecimal.new().
* The jdbc/sqlserver adapter now works correctly on JRuby 9.2+.
* An additional check constraint violation failure message is now
recognized on SQLite.
sequel-5.63.0/doc/release_notes/5.12.0.txt 0000664 0000000 0000000 00000012734 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* An eager_graph_eager plugin has been added, which allows you to
chain eager loads using separate queries to an existing dataset that
uses eager_graph. Given the following model associations:
Band.one_to_many :albums
Album.one_to_many :tracks
Let's say you wanted to return bands ordered by album name, and
eagerly load those albums, you can do that using:
Band.eager_graph(:albums).order{albums[:name]}
Let's say you also wanted to eagerly load the tracks for each album.
You could just add them to the eager_graph call:
Band.eager_graph(albums: :tracks).order{albums[:name]}
However, the bloats the result set, and you aren't ordering by the
track information, so a join is not required. The eager_graph_eager
plugin allows you to specify that the tracks be eagerly loaded in a
separate query after the eager_graph load of albums:
Band.eager_graph(:albums).
eager_graph_eager([:albums], :tracks).
order{albums[:name]}
eager_graph_eager's first argument is a dependency chain, specified
as an array of symbols. This specifies the point at which to
perform the eager load. The remaining arguments are arguments that
could be passed to Dataset#eager to specify what dependent
associations should be loaded at that point.
* A caller_logging Database extension has been added, which logs
caller information before queries, filtering out the internal
Sequel callers. Example:
DB.extension :caller_logging
DB[:table].first
# Logger:
# (0.000041s) (source: /path/to/app/foo/t.rb:12 in `get_first`)
# SELECT * FROM table LIMIT 1
You can further filter the caller lines by setting
Database#caller_logging_ignore to a regexp of additional caller
lines to ignore. This is useful if you have specific methods or
internal extensions/plugins that you would also like to ignore as
they obscure the code actually making the request.
DB.caller_logging_ignore = %r{/path/to/app/lib/plugins}
You can also format the caller before it is placed in the logger,
using caller_logging_formatter:
DB.caller_logging_formatter = lambda do |caller|
"(#{caller.sub(/\A\/path\/to\/app\//, '')})"
end
DB[:table].first
# Logger:
# (0.000041s) (foo/t.rb:12 in `get_first`) SELECT * FROM table LIMIT 1
* Database#call_procedure has been added to the postgres adapter, and
is usable on PostgreSQL 11+ for calling procedures created with
CREATE PROCEDURE.
DB.call_procedure(:foo, 1, "bar")
# CALL foo(1, 'bar')
This method will return a hash of results if the procedure returns
a result, or nil if it does not return a result.
= Other Improvements
* It is now possible to use Dataset#eager_graph in an eager load
callback for associations that use join tables. This allows you
to eager load some associations using separate queries and other
associations using joins. For example:
Band.eager(:albums=>proc{|ds| ds.eager_graph(:tracks)})
Will load the bands in one query, and load the albums and tracks
in a separate query using a join. Previously, this construction
worked only for associations that did not use join tables. It now
works for associations that use join tables, as long as existing
selected columns are not removed inside the callback.
* The tactical_eager_loading plugin now handles automatic eager
loading for associated objects that were created during the
load of dataset that uses eager_graph. When using the plugin,
the following code will now only execute 2 queries, instead of
issuing a separate query for each album to get the tracks for
the album.
artists = Artist.eager_graph(:albums).all
artists.each do |artist|
artist.albums.each do |album|
album.tracks
end
end
* Calling Dataset#graph with a dataset with existing selections where
the column aliases cannot be determined automatically now works
correctly by using a subselect. Previously, attempting to do this
would raise an exception. This allows the following code to work:
DB[:table].select_all(:table).select_append(expr).graph(...)
* Datasets now cache the EagerGraphLoader object that is generated to
convert arrays of hashes into an object graph, so that subsequent
eager loads on the same dataset do not need to recompute the same
information. Most EagerGraphLoader internal state is now frozen to
prevent unintentional modification.
* Sequel.extension now loads files from gems. Previously, it used
Kernel.require, which does not load files from gems.
* Adapters that emulate prepared statements using literalization now
use a placeholder literalizer and should execute significantly
faster. More prepared statement internal metadata is now frozen
to prevent unintentional modification.
* Dataset#intersect, #except, and #nowait are now supported on MariaDB
10.3+.
* The constraint_validations extension now respects the
constraint_validations_table setting when adding metadata for the
constraint validations.
* In the oracle adapter, the clob prepared statement argument type is
now mapped to the OCI8::CLOB class, allowing the use of Oracle
procedures with clob output parameters.
* The Model.load_cache method in the static_cache plugin is now public.
= Backwards Compatibility
* The private Dataset#prepared_arg? method has been removed. It is no
longer necessary after the refactoring to the prepared statement
code. External adapters that currently call the method should be
updated to no longer call the method.
sequel-5.63.0/doc/release_notes/5.13.0.txt 0000664 0000000 0000000 00000001675 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A constant_sql_override Database extension has been added, allowing
for overriding the SQL used by constants such as
Sequel::CURRENT_TIMESTAMP. This can be used to force
CURRENT_TIMESTAMP to be literalized at a particular time zone:
DB.extension :constant_sql_override
DB.set_constant_sql(Sequel::CURRENT_TIMESTAMP,
"CURRENT_TIMESTAMP AT TIME ZONE 'UTC'")
* Prepared statements now support the :single_value type, which
returns the first column value in the dataset.
prep_stmt = DB[:table].select(:column).prepare(:single_value, :ps)
prep_stmt.call
# PREPARE ps AS SELECT column FROM table LIMIT 1;
# EXECUTE ps;
# => 42
= Other Improvements
* Dataset#from_self will no longer use a cached dataset if any options
are given, as that can result in incorrect behavior.
* Model.all in the static_cache plugin now accepts a block, mirroring
the API when the static_cache plugin is not used.
sequel-5.63.0/doc/release_notes/5.14.0.txt 0000664 0000000 0000000 00000004766 14342141206 0020000 0 ustar 00root root 0000000 0000000 = New Features
* The :nulls option when creating ordered expressions is now supported
on all databases that Sequel ships support for. For databases that
do not support NULLS FIRST/NULLS LAST, support is emulated.
ds.order(Sequel.asc(:name, :nulls=>:last))
# When emulated:
# ORDER BY (CASE WHEN (name IS NULL) THEN 2 ELSE 1 END), name ASC
* Model#pk_equal? has been added as a more descriptive name for
Model#===. Model#=== is now an alias of Model#pk_equal?.
* The roots and roots_dataset class methods in the tree plugin are now
also available as dataset methods.
= Other Improvements
* Inverting expressions using the ANY/SOME/ALL SQL operators now works
correctly:
# Sequel <5.14.0
Sequel.~(:a=>Sequel.function(:any, :x))
# "(a != any(x))"
# Sequel >=5.14.0
Sequel.~(:a=>Sequel.function(:any, :x))
# "NOT (a = any(x))"
Sequel has always tried to push inversion down to create SQL that is
easier to reason about. However, inversion cannot be pushed down if
an ANY/SOME/ALL SQL operator is used, because that is a different
type of operation that just happens to use the same syntax. Sequel
now avoids inversion push down for boolean operators where the
right hand side is an SQL::Function, LiteralString, or
SQL::PlaceholderLiteralString.
* When creating a boolean expression from a hash or array of pairs, if
the right hand side is an unfrozen array and string, use a frozen
copy in the expression, so that mutating the array or string
argument later does not affect the expression.
* When using the defaults_setter plugin with the :cache option, do not
cache values for columns without parseable defaults. If the default
value exists but is not parseable, caching such values could result
in incorrect behavior if the model instance is saved later.
* For models with composite primary keys, Model#=== now returns false
if any primary key value is nil, mirroring the behavior for the
scalar primary key case.
* Model datasets no longer cache SQL if they include a subquery that
cannot cache SQL.
* The SQL used for constraints in the constraint_validations
extension when the :allow_nil option is used is now clearer and
easier to understand.
* The postgres adapter no longer specifies a default port when using
the pg driver, in order to work with configurations where the
:service option is used in the :driver_options hash. The pg driver
defaults to port 5432 if no port is given, so this should not affect
backwards compatibility.
sequel-5.63.0/doc/release_notes/5.15.0.txt 0000664 0000000 0000000 00000003253 14342141206 0017767 0 ustar 00root root 0000000 0000000 = New Features
* A :qualify_tables option has been added to the
class_table_inheritance plugin, which will automatically qualify
subclass tables with the same qualifier as the superclass table
if the superclass table is qualified.
* Model#save_validation_on_next_save! has been added, which skips all
validation on the next save to the object, including the running
of validation related hooks. This method is designed for use only
when Model#valid? is called on the object before saving, to avoid
running validations on the object twice. This method takes
precedence even over an explicit validate: true option passed to
Model#save, and as such should be used with care.
* The postgres adapter now supports a :conn_str Database option to
use a PostgreSQL connection string (e.g. "host=foo port=2442") when
connecting. This option has preference over other connection
related options if it is present.
= Other Improvements
* If a foreign key for a model object is changed from a nil value to
a non-nil value, any cached associated objects related to the
foreign key are no longer removed. Such associated objects could
only be set manually, and if they have been set manually, it is
probably not a good idea to remove them automatically.
* When using the nested_attributes plugin, new *_to_many associated
objects are not validated twice when saving.
* The default table alias when using the class_table_inheritance
plugin now correctly handles qualified tables.
* A theoretical thread safety issue when assigning connections
in the threaded connection pools has been fixed.
* Renaming columns is now supported without emulation when using
SQLite 3.25+.
sequel-5.63.0/doc/release_notes/5.16.0.txt 0000664 0000000 0000000 00000010066 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* Database#rollback_on_exit has been added, which allows you to
rollback transactions instead of committing them when exiting
the transaction block. Previously, the only way to rollback
a transaction from inside a transaction block was to raise
an exception. This allows you to tell Sequel to roll the
transaction back on exit, and then use return or throw to exit
the transaction block.
Database#rollback_on_exit supports savepoints, including
multiple savepoint levels, as well as canceling rollbacks:
DB.transaction do # BEGIN
DB.rollback_on_exit
end # ROLLBACK
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: true)
end # ROLLBACK TO SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: true)
end # ROLLBACK TO SAVEPOINT
end # RELEASE SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: true)
end # ROLLBACK TO SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: 2)
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK TO SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: 3)
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK
DB.transaction do # BEGIN
DB.rollback_on_exit
DB.rollback_on_exit(cancel: true)
end # COMMIT
* Sequel now supports window functions on SQLite 3.26.0+. SQLite
technically supports window functions on 3.25.0+, but enabling
window function support in Sequel opens up a code path that
generates queries that cause older versions of SQLite to produce a
segmentation fault. This bug in SQLite has been fixed in 3.26.0.
= Other Improvements
* Sequel::Model no longer overrides existing methods when defining
getters and setters. Historically, it only checked for existing
method definitions for methods that could be directly expressed
(e.g. not requiring send). Sequel 5 broke the check for setter
methods that could be directly expressed. This fixes cases where
model inheritance is used and the setter methods are overridden
in a parent class.
* Alter table emulation now works correctly on SQLite 3.26.0+.
* The one_to_one association setter does not modify reciprocal
associations in cases where doing so is not necessary. This can
fix some cases where the nested_attributes plugin is used.
* The class_table_inheritance plugin can now take advantage of the
schema_caching extension to prevent database queries to determine
column information when the class is created.
* The nested_attributes plugin no longer validates one_to_one
associations twice when saving.
* The class_table_inheritance plugin :qualify_tables option now
correctly qualifies subclasses of subclasses.
* SQL expressions that are subscripted are now wrapped in parentheses.
This fixes at least subscripting a function expression on
PostgreSQL:
DB[:t].select{array_agg(column).sql_subscript(1)}
# SELECT (array_agg(column))[1] FROM t
* Sequel::Migrator now uses more descriptive error messages if a
missing or empty migration directory is given.
* bin/sequel -C when converting from SQLite to another database
type will now use 64-bit integer columns in the other database when
the SQLite column type is integer, as SQLite supports storing
64-bit values in integer columns, and most other databases only
support 32-bit values in integer columns.
= Backwards Compatibility
* The mysql adapter no longer attempts to load the mysqlplus driver,
it now only attempts to load the mysql driver.
sequel-5.63.0/doc/release_notes/5.17.0.txt 0000664 0000000 0000000 00000002011 14342141206 0017760 0 ustar 00root root 0000000 0000000 = New Features
* An instance-level skip_auto_validations method has been added to
the auto_validations plugin, allowing you to skip all or specific
types of auto validations inside the block:
model_instance.skip_auto_validations(:unique) do
puts model_instance.valid?
end
* A Database :preconnect_extensions option has been added. This
option is similar to :extensions, but the extensions are loaded
before the :preconnect option is processed. This allows you to
use the server_logging extension with the :preconnect option.
* For specifying custom table aliases when using eager_graph and
association_join, you can now use:
Sequel[:association].as(:table_alias)
in addition to:
Sequel.as(:association, :table_alias)
= Other Improvements
* The ado/mssql adapter now retrieves the number of deleted or
updated rows for a query without issuing a separate query.
* Sequel now avoids the use of Proc.new with an implicit block, as
that feature will be deprecated starting in Ruby 2.7.
sequel-5.63.0/doc/release_notes/5.18.0.txt 0000664 0000000 0000000 00000004235 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A throw_failures plugin has been added for throwing ValidationFailed
and HookFailed exceptions instead of raising them. This can improve
performance by up to 10x on JRuby and 10-15% on CRuby. However,
you would need to modify your exception handling from:
begin
# model.save
rescue Sequel::ValidationFailed => e
# handle failure
end
to:
e = catch(Sequel::ValidationFailed) do
# model.save
end
if e.is_a?(Sequel::ValidationFailed)
# handle failure
end
The throw_failures plugin will still work if you are not catching
the exception, falling back to the default behavior of raising
the exception.
* SQL::Blob.call has been added, so that SQL::Blob can be used
directly as a callable to create a new instance, resulting in
better performance in cases where a callable is needed.
= Other Improvements
* Type conversion is many adapters is now faster by switching from
Proc/Method instances to using singleton call methods on plain
objects. This can improve performance of row fetching by up to
10% in some cases.
* Row fetching is slightly faster in the jdbc and sqlite adapters,
by switching from each to while.
* tzinfo 2 is now supported when using the named_timezones extension.
tzinfo 1 remains supported.
* The optimized Dataset#paged_each methods in the postgres and mysql2
adapters now support being called without a block, returning an
Enumerator in that case, to mirror the behavior of the default
Dataset#paged_each method.
* Sequel no longer uses flow-control exceptions in the
connection_expiration and connection_validator extensions,
significantly improving performance on JRuby.
* The after_initialize plugin no longer makes the argument to
Model.call optional.
= Backwards Compatibility
* Some internal by not private constants and methods previously used
for type conversion in adapters have been removed:
* JDBC::Oracle.OracleDecimal
* JDBC::Oracle.OracleClob
* JDBC::Postgres.RubyPGArray
* JDBC::Postgres.RubyPGHstore
* JDBC::SqlAnywhere.SqlAnywhereBoolean
* JDBC::SQLServer.MSSQLRubyTime
* MySQL::TYPE_TRANSLATOR
* Postgres::TYPE_TRANSLATOR
sequel-5.63.0/doc/release_notes/5.19.0.txt 0000664 0000000 0000000 00000001705 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A Database#rename_enum_value method has been added to the pg_enum
extension. It is supported on PostgreSQL 10+:
DB.rename_enum_value(:enum_type, 'old_name', 'new_name')
= Other Improvements
* The performance of row fetching and type conversion in the
sqlanywhere adapter has been improved.
* The performance of row fetching in the sqlite adapter has been
improved.
* Calling Database#drop_table now drops any constraint validations
metadata for the table if using the constraint_validations
extension. However, modifying the table using Database#alter_table
does not affect the constraint validations metadata.
* The sqlite adapter when used with ruby-sqlite3 1.4.0+ now uses
SQLite extended result codes for a more accurate determination of
specific database errors types.
* Performance for typecasting to decimal and floats has been improved
slightly.
* Performance when merging hashes has been improved slightly.
sequel-5.63.0/doc/release_notes/5.2.0.txt 0000664 0000000 0000000 00000002467 14342141206 0017711 0 ustar 00root root 0000000 0000000 = New Features
* A pg_extended_date_support extension has been added. This
extension adds support for infinite and BC dates/timestamps on
PostgreSQL.
The postgres adapter already had a convert_infinite_timestamps
setting, but it wasn't supported in the jdbc/postgresql adapter
and it didn't handle BC dates/timestamps. Setting a non-default
convert_infinite_timestamps setting in the postgres adapter will
now automatically load the extension for backwards compatibility.
The pg_extended_date_support extension by default just fixes the
handling of BC dates/timestamps. To get it to handle infinite
timestamps, you need to choose the appropriate setting for your
application:
DB.extension :pg_extended_date_support
DB.convert_infinite_timestamps = :string # or :float or :nil
This extension also enables the handling of timezone offsets
with seconds, which is not natively supported by ruby's Time
class in ruby <2.5.
= Improvements
* The jdbc/mysql adapter now handles smallint unsigned and
integer unsigned column types where the value for the column
is outside of the range of a Java short or integer.
* Sequel::Model.inherited no longer modifies an existing @dataset
instance variable if one has already been set. This fixes a
regression that was introduced in Sequel 5.0.0.
sequel-5.63.0/doc/release_notes/5.20.0.txt 0000664 0000000 0000000 00000006077 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* Database#after_commit and #after_rollback transaction hook methods
now support a :savepoint option. Using the :savepoint option makes
the hooks savepoint-aware, so after_commit will only be called if
all enclosing savepoints and the transaction are committed, and
after_rollback will be called when any of the enclosing savepoints
are rolled back (which may be before transaction commit/rollback).
Examples:
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
x # nil
end # RELEASE SAVEPOINT
x # nil
end # COMMIT
x # 1
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
x # nil
raise Sequel::Rollback
end # ROLLBACK TO SAVEPOINT
x # 2
end # COMMIT
x # 2
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
end # RELEASE SAVEPOINT
x # nil
raise Sequel::Rollback
end
x # 2
* The pg_auto_constraint_validations plugin now supports a
pg_auto_constraint_validation_override method for overriding
the columns and message for a specific constraint. This is
useful if the database cannot determine the columns (due
to the constraint containing a database function call), or
if you would like to customize the message per constraint.
= Other Improvements
* The one_to_one association setter now works with models that use
joined datasets, such as child models when using the
class_table_inheritance plugin.
* Database#check_constraints on PostgreSQL now also includes CHECK
constraints where the related columns are not known. The :columns
entry in the hash will be an empty array in such cases. The
exclusion of such constraints in previous versions was not
intentional, and the documentation implied that all CHECK
constraints were returned.
* Many cases where instance_exec was previously used on model
instances have been changed so that instance methods are defined
and called instead. This avoids the creation of singleton classes
for model instances, and can significantly improve performance in
some cases. This affects all associations as well as the
following plugins:
* composition
* hook_class_methods
* validation_class_methods
Other cases where instance_exec is now avoided and a different
approach is used:
* association_dependencies plugin
* PlaceholderLiteralString#with_dataset
* The auto_validations plugin now works with child models when using
the class_table_inheritance plugin.
* Database#server_version now works correctly in the mysql2 adapter
when using the MySQL driver with MariaDB 10+.
* The float unsigned type is now recognized and supported in the
schema parser and schema_dumper extension.
sequel-5.63.0/doc/release_notes/5.21.0.txt 0000664 0000000 0000000 00000006377 14342141206 0017776 0 ustar 00root root 0000000 0000000 = New Features
* The pg_json extension now adds a Database#wrap_json_primitives
accessor. When set to true, JSON primitive values (string, number,
true, false, and null) will be wrapped by delegate Ruby objects
instead of using Ruby primitives. This allows the values to round
trip, so the following code will work even for primitive values in
json_column:
DB.extension :pg_json
DB.wrap_json_primitives = true
value = DB[:table].get(:json_column)
DB[:other_table].insert(json_column: value)
This should be enabled with care, especially in cases where false
and null JSON values are used, as the behavior will change if
the objects are used in a boolean context in Ruby, as only false
and nil in Ruby are treated as false:
# assume JSON false or null value
value = DB[:table].get(:json_column)
if value
# executed if wrap_json_primitives is true
else
# executed by default
end
When typecasting input in model objects to a JSON type, string
input will still be parsed as JSON. However, you can set the
Database#typecast_json_strings accessor to true, and then string
input will be considered as a JSON string instead of parsing the
string as JSON.
To prevent backwards compatibility issues, Sequel.pg_json/pg_jsonb
behavior has not changed. To support wrapping Ruby primitives in
the delegate objects, new Sequel.pg_json_wrap/pg_jsonb_wrap methods
have been added. These methods only handle the Ruby primitives,
they cannot be used if the existing object is already a delegate
object.
As model objects always consider a nil value as SQL NULL and do
not typecast it, if you want to explicitly set a JSON null value,
you need to wrap it explicitly:
model_object.json_column = Sequel.pg_json_wrap(nil)
= Other Improvements
* Sequel now supports window function options :window, :exclude, and
:frame :type=>:groups, :start, and :end on SQLite 3.28.0+.
* The server_block extension now respects the :servers_hash Database
option. This makes it more similar to Sequel's default behavior.
However, that means by default, the server_block extension will
default to handling unknown shards as the default shard, instead
of raising an error for them.
* The rcte_tree plugin now disallows eager graphing of the ancestors
and descendants associations. Previously, eager graphing of these
associations generated incorrect results. It is not possible to
eager graph these extensions, but normal eager loading does work.
* The ado adapter's performance has been improved by using faster
callables for type conversion and a more efficient inner loop.
* The sqlite adapter now converts a :timeout option given as a string
to an integer. This allows you to use the option inside of a
connection string.
* The mysql and mysql2 adapters now recognize an additional
DatabaseLockTimeout error.
* The jdbc/mysql adapter now works correctly when using JRuby with
Java 11.
* The ado adapter now handles numeric values when using locales that
use comma instead of period as the decimal separator.
= Backwards Compatibility
* In the pg_json extension, the following singleton methods of
Sequel::Postgres::JSONDatabaseMethods are now deprecated:
* parse_json
* db_parse_json
* db_parse_jsonb
sequel-5.63.0/doc/release_notes/5.22.0.txt 0000664 0000000 0000000 00000003013 14342141206 0017757 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now supports Ruby 2.7+ startless ranges in filters:
DB[:table].where(:column=>(..10))
# SELECT * FROM table WHERE (column <= 10)
DB[:table].where(:column=>(...10))
# SELECT * FROM table WHERE (column < 10)
It also supports startless, endless ranges in filters, using a
condition that is always true:
DB[:table].where(:column=>(nil..nil))
# SELECT * FROM table WHERE (1 = 1)
* Sequel now supports startless ranges in the pg_range extension:
DB.extension :pg_range
DB[:table].insert(:column=>(..10))
# INSERT INTO "table" ("column") VALUES ('[,10]') RETURNING "id"
DB[:table].insert(:column=>(...10))
# INSERT INTO "table" ("column") VALUES ('[,10)') RETURNING "id"
DB[:table].insert(:column=>(nil..nil))
# INSERT INTO "table" ("column") VALUES ('[,]') RETURNING "id"
* Sequel now supports a :materialized option in Dataset#with on
PostgreSQL 12+, to control the inlining of common table expressions:
DB[:t].with(:t, DB[:t2], :materialized=>false)
# WITH "t" AS NOT MATERIALIZED (SELECT * FROM "t2")
# SELECT * FROM "t"
DB[:t].with(:t, DB[:t2], :materialized=>true)
# WITH "t" AS MATERIALIZED (SELECT * FROM "t2")
# SELECT * FROM "t"
= Other Improvements
* Database#primary_key_sequence now works for tables without serial
sequences on PostgreSQL 12+.
* Dataset#multi_insert and #import with return: :primary_key option
on Microsoft SQL Server now work correctly if the dataset uses
a row_proc (e.g. for model datasets).
sequel-5.63.0/doc/release_notes/5.23.0.txt 0000664 0000000 0000000 00000003622 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* An insert_conflict plugin has been added for automatically handling
constraint conflicts when saving new model instances. It is
supported on PostgreSQL 9.5+ and SQLite 3.24.0+.
Album.new(name: 'Foo', copies_sold: 1000).
insert_conflict(
target: :name,
update: {copies_sold: Sequel[:excluded][:b]}
).
save
* On Microsoft SQL Server, the Database :ansi option has been added,
which sets the following ANSI related options:
* ANSI_NULLS
* ANSI_PADDING
* ANSI_WARNINGS
* ANSI_NULL_DFLT_ON
* QUOTED_IDENTIFIER
* CONCAT_NULL_YIELDS_NULL
= Other Improvements
* Sequel.datetime_class = Time is now supported when using the
named_timezones extension. For backwards compatibility, the
named_timezones extension still sets
Sequel.datetime_class = DateTime. When using Ruby 2.6+, the
Time instances have the timezone set on them using Ruby 2.6+'s
timezone support, but basic support works correctly in earlier
versions of Ruby.
* On Microsoft SQL Server, Sequel now handles parsing schema for
tables in another database on the same server or in a database
on a linked server.
* The pg_json extension now correctly handles subclasses of core
classes when wrapping objects. This stopped working in Sequel
5.21.0, when support for wrapping JSON primitives was added.
* Sequel now works around a couple bugs in jdbc-sqlite 3.27.2.1,
allowing schema parsing and foreign key parsing to work.
* Dataset#execute* private methods now respect an explicitly given
:server option, fixing Dataset#paged_each in the postgres adapter
when using sharding.
* Timezone offsets are now handled correctly when typecasting an array
or hash to datetime when Sequel.datetime_class = Time.
* Sequel now avoids errors when parsing schema when using the mock
SQLite adapter.
* A minor thread-safety issue has been fixed in the named_timezones
extension.
sequel-5.63.0/doc/release_notes/5.24.0.txt 0000664 0000000 0000000 00000004615 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* A :cache_file plugin option has been added to the
pg_auto_constraint_validations plugin. This option specifies
a file to use to cache the metadata the plugin uses, so the
plugin does not need to run 5 queries per model at startup to
load the metadata. This can dramatically improve startup time
when using the plugin with a large number of models.
To create the metadata file, load the plugin into Sequel::Model
(or whatever class you are using as the base class for your
model classes) with the :cache_file option, and after loading
all of the subclasses of that class, run:
Sequel::Model.dump_pg_auto_constraint_validations_cache
As when using the schema_caching and index_caching extensions,
it is up to the user to ensure that the cached metadata matches
the current database schema. Sequel does no checking of this,
as checking would take more time, and the point of this plugin
is to improve startup performance.
* A static_cache_cache plugin has been added. This plugin allows
for caching rows for models using the static_cache plugin. This
prevents the need to issue a query at model creation time to
get the rows. This plugin should be loaded into Sequel::Model
(or whatever class you are using as the base class for your
model classes) before loading the models using the static_cache
plugin. To create the metadata file, after all subclasses of
that class have been loaded, run:
Sequel::Model.dump_static_cache_cache
* :unique_deferrable and :primary_key_deferrable column
options are now supported on PostgreSQL 9+ and Oracle. This
allows you to created deferrable unique and primary key
column constraints. You could already create deferrable
table constraints using the :deferrable option to the primary_key
and unique methods.
* A :generated_always_as column option is now supported on
PostgreSQL 12+, for creating generated columns.
* A Database#skip_logging? private method has been added. This
is designed for use in extensions, to force log timing even
when no loggers are configured.
= Other Improvements
* Sequel no longer sets the :host option to localhost by default
in the mysql2 adapter. This prevents Sequel from overriding
a host specified in the defaults_file.
* All database array types are converted to Ruby arrays in the
jdbc adapter. Previously, this was only done in the
jdbc/postgresql subadapter.
sequel-5.63.0/doc/release_notes/5.25.0.txt 0000664 0000000 0000000 00000002211 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* An association_multi_add_remove plugin has been added. This plugin
adds a shortcut for adding or removing multiple associated objects
in a single method call:
Artist.plugin :association_multi_add_remove
Artist.many_to_one :albums
Artist[1].add_albums([Album[2], Album[3]])
Artist[1].remove_albums([Album[4], Album[5]])
It also offers a setter method, which will add and remove associated
objects as necessary:
Artist[1].albums = [Album[3], Album[4]]
= Other Improvements
* The sharding plugin now integrates with the server_block extension.
This makes it so if you retrieve a model instance inside a
with_server block, saving the model instance will save it back to
the shard from which it was retrieved.
* Setting a default for a column on Microsoft SQL Server now works
correctly if the column already has a default.
* Sequel::SQL::NumericMethods#coerce no longer raises NoMethodError
if the super method is not defined. This fixes some cases when
comparing Date/DateTime instances to Sequel objects.
* The csv_serializer plugin now avoids keyword argument separation
issues on Ruby 2.7+.
sequel-5.63.0/doc/release_notes/5.26.0.txt 0000664 0000000 0000000 00000002707 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Features
* Support for SQL/JSON path expressions has been added to the
pg_json_ops extension. These are supported in PostgreSQL 12+.
Examples:
j = Sequel.pg_json_op(:json_column)
j.path_exists('$.foo') # (jsonb_column @? '$.foo')
j.path_match('$.foo') # (jsonb_column @@ '$.foo')
j.path_exists!('$.foo') # jsonb_path_exists(jsonb_column, '$.foo')
j.path_match!('$.foo') # jsonb_path_match(jsonb_column, '$.foo')
j.path_query('$.foo') # jsonb_path_query(jsonb_column, '$.foo')
j.path_query_array('$.foo') # jsonb_path_query_array(jsonb_column, '$.foo')
j.path_query_first('$.foo') # jsonb_path_query_first(jsonb_column, '$.foo')
* The nested_attributes method in the nested_attributes plugin now
supports a :require_modification option, which can override the
default require_modification setting for the nested objects. This
can be useful to avoid errors if multiple requests are submitted
simultaneously to delete the same nested row.
= Other Improvements
* The dirty plugin now works correctly with the typecast_on_load
plugin.
* Sequel::Postgres::PGRange#hash has been added to the pg_range
extension, allowing PGRange instances to be usable as hash keys.
* Table aliases are now supported for single table INSERT
statements on PostgreSQL 9.5+, which can make some insert_conflict
usage easier.
* Two more foreign key constraint violation types are now recognized
on MySQL 8.0.13+.
sequel-5.63.0/doc/release_notes/5.27.0.txt 0000664 0000000 0000000 00000001326 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Features
* Sequel::DEFAULT has been added a constant for the DEFAULT expression,
useful in inserts and especially updates:
DB[:a].where(:id=>1).update(:b=>Sequel::DEFAULT)
# UPDATE "a" SET "b" = DEFAULT WHERE "id" = 1
* SQL::Function#filter for filtered aggregate functions is now
supported on all databases. On databases not supporting it natively
(all except PostgreSQL 9.4+ and SQLite 3.30+), a CASE statement is
used to emulate the support.
= Other Improvements
* NULLS FIRST/LAST is now used without emulation on SQLite 3.30+.
* The pg_enum extension now works correctly on PostgreSQL 8.3-9.0.
* Postgres::ArrayOp#join in the pg_array_ops extension now works
correctly on PostgreSQL <9.1.
sequel-5.63.0/doc/release_notes/5.28.0.txt 0000664 0000000 0000000 00000001117 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* An any_not_empty extension has been added, for making Dataset#any?
without a block be the same as !empty?. This can result in a
much faster database query.
* An exclude_or_null extension has been added, adding a
Dataset#exclude_or_null method that returns rows where the given
expression is false or NULL. This extension is supported on
PostgreSQL, SQLite, MySQL, H2, and HSQLDB.
= Other Improvements
* When using the jdbc/postgresql adapter, calling with_fetch_size
on a dataset will emit a warning. This is because the driver
will ignore the setting.
sequel-5.63.0/doc/release_notes/5.29.0.txt 0000664 0000000 0000000 00000001570 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Features
* An empty_failure_backtraces plugin has been added for using empty
backtraces for ValidationFailed and HookFailed exceptions. In many
cases, these exceptions are automatically handled (e.g. web form
submission handling to display appropriate error pages), and using
empty backtraces is 10-15x faster on JRuby 9.2.7.0+.
* Dataset#json_serializer_opts has been added to the json_serializer
plugin. This allows setting default options on a per-Dataset basis
for all Dataset#to_json calls.
= Other Improvements
* Another disconnect error is now recognized in the tinytds adapter.
* Using Sequel with the the CRuby master branch (what will be Ruby 3)
now works by supporting a second argument for
Dataset#initialize_clone.
* Sequel now avoids a warning in verbose mode when using the postgres
adapter, a recent version of ruby-pg, and bound variables.
sequel-5.63.0/doc/release_notes/5.3.0.txt 0000664 0000000 0000000 00000010635 14342141206 0017706 0 ustar 00root root 0000000 0000000 = New Features
* An :extensions Database option is now supported, which will load the
named extensions into the Database before any connections are
initiated:
DB = Sequel.connect('mock:///', :extensions=>[:error_sql, :synchronize_sql])
DB = Sequel.connect('mock:///?extensions=error_sql,synchronize_sql')
* A :connect_sqls Database option is now supported, which will issue
the given queries on all new connections:
DB = Sequel.connect('postgres:///', :connect_sqls=>[
'SET random_page_cost = 1.0',
"SET default_tablespace = 'foo'"
])
* DatasetModule#reverse has been added for simpler use of descending
orders:
class Foo < Sequel::Model
dataset_module do
reverse :newest_first, :created_at
end
end
Foo.newest_first.first(10)
* A synchronize_sql extension has been added. This extension checks
out a connection around SQL string creation, and is useful in the
cases where escaping values in the query requires a connection and
a large number of values need to be escaped.
* The following features are now supported on MariaDB 10.2+:
* Common table expressions.
* Window functions.
* Dropping CHECK constraints. Older versions of MariaDB/MySQL
ignored CHECK constraints that were added, and Sequel did not
attempt to filter them out, so Sequel did not require changes to
add CHECK constraints. MariaDB 10.2 CHECK constraints work
correctly with Sequel's constraint_validations extension/plugin.
* Raising CHECK constraint violations as
Sequel::CheckConstraintViolation instances.
* Recognizing curdate() as Sequel::CURRENT_DATE when used as the
default value for a date column.
* Date::Infinity values are now supported in the
pg_extended_date_support extension:
DB.convert_infinite_timestamps = :date
This returns infinite dates/timestamps as Date::Infinity instances,
and literalizes Date::Infinity instances correctly.
= Improvements
* Database#reset_primary_key_sequence now works correctly on
PostgreSQL 10.
* If a commit or rollback raises an exception when using the postgres
adapter, Sequel will check the connection's current transaction
status and only send another rollback if the connection is currently
inside a transaction. This fixes a warning that is issued in most
cases if a commit or rollback fails.
* The jdbc/postgresql adapter now forces JDBC PreparedStatement
instances created by Dataset#call to never be prepared server side,
working around an caching issue in the jdbc-postgres drier in
versions greater than 9.4.1200.
* Database#indexes will no longer return indexes which are in the
process of being dropped on PostgreSQL 9.3+. Additionally,
Database#indexes will now return indexes that have indcheckxmin
set. The previous removal of indexes with indcheckxmin set is
more likely to cause false negatives than correctly remove
indexes not yet valid.
* Common table expressions are no longer hoisted from subqueries on
SQLite. They are still hoisted from queries used in
UNION/INSERT/EXCEPT, since SQLite does not support common table
expressions at that level.
* On Microsoft SQL Server, using an INSERT query with a subquery that
uses a common table expression now hoists the common table
expression from subquery level to main query level, allowing such
queries to work.
* An additional disconnect error is now recognized in the oracle
adapter.
* bin/sequel now adds a Database logger before the initial
connection is made, allowing you to see any connection setup
statements issued to the database.
= Backwards Compatibility
* Calling a filtering method with no argument and a virtual row
block that returns nil on a dataset with no existing filter is
deprecated in this version and will emit a warning. The behavior
in this version remains the same, where the dataset is not
modified. The behavior will change in Sequel 5.4.0 so that a
WHERE NULL filter will be added in that case, instead of the
filter being ignored, so that the behavior is similar to calling
the filtering method with a nil argument.
# Sequel 5.3.0
DB[:a].where{nil}
# SELECT * FROM a
# Sequel 5.4.0
DB[:a].where{nil}
# SELECT * FROM a WHERE NULL
* Support for PostgreSQL <8.1 has been dropped from Database#indexes.
Sequel's PostgreSQL support requires >=8.2 for Dataset#insert to
work, so it doesn't make sense to support earlier versions in other
cases.
sequel-5.63.0/doc/release_notes/5.30.0.txt 0000664 0000000 0000000 00000001142 14342141206 0017757 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now supports generated columns on SQLite 3.31+ using the
:generated_always_as and :generated_type options. Example:
DB.create_table(:table) do
primary_key :id
Numeric :amount, null: false
Numeric :tax, null: false
Numeric :total, generated_always_as: (Sequel[:amount] + :tax)
end
= Other Improvements
* The Database#transaction :before_retry option is now called before
retrying the transaction even when the :num_retries option is set
to nil.
* The gem no longer ships with specs and older release notes, reducing
the gem size by over 40%.
sequel-5.63.0/doc/release_notes/5.31.0.txt 0000664 0000000 0000000 00000011666 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Features
* A forbid_lazy_load plugin has been added to forbid the lazy loading
of model associations if the current object was retreived with other
objects. This plugin helps detect N+1 query issues. This plugin
will raise an error if a lazy load is detected in such cases:
Album.plugin :forbid_lazy_load
Album.one_to_many :tracks
Album.each do |album|
album.tracks
# Could be N+1, raises Sequel::Plugins::ForbidLazyLoad::Error
end
Album.first.tracks
# Could not be N+1, no error raised
The forbid_lazy_load plugin is designed to be loaded into the base
model class (generally Sequel::Model), and can be loaded only in
test mode, or only in certain test mode configurations, so that it
does not have any production performance impact.
Note that an alternative approach that Sequel has supported for many
years is the tactical_eager_loading plugin, which automatically
eager loads when an N+1 query issue is detected.
* An association_lazy_eager_option plugin has been added which supports
the :eager option for the association method. If the association has
not been loaded, this eagerly loads the associations specified by the
:eager option when loading the association. If the association has
already been loaded, this option is ignored, with the assumption that
whatever loaded the association already used the correct eager
loading. Example:
Album.plugin :association_lazy_eager_option
Album.one_to_many :tracks
Track.many_to_one :artist
album = Album.first
album.tracks(:eager=>:artist)
# Loads tracks for album, then artist for each track (2 queries)
album.tracks(:eager=>:artist)
# No query issued as association is cached
You could previously have similar behavior for uncached associations
by passing a block to the association method and calling eager on
the yielded dataset. However, that would ignore any cached
association, causing redundant loading of the association in such
cases.
* On PostgreSQL 10+, creating partitioned tables and partitions of
other tables is now supported.
To create a partitioned table, use the :partition_by option:
DB.create_table(:table1, partition_by: :date_column,
partition_type: :range) do
Integer :id
Date :date_column
end
DB.create_table(:table2, partition_by: :string_column,
partition_type: :list) do
Integer :id
String :string_column
end
DB.create_table(:table3, partition_by: :int_column,
partition_type: :hash) do
Integer :id
Integer :int_column
end
To add partitions of other tables, use the :partition_of option.
This option will use a custom DSL specific to partitions of other
tables.
For range partitioning, you can use the from and to methods to
specify the inclusive beginning and exclusive ending of the range
of the partition. You can call the minvalue and maxvalue methods
to get the minimum and maximum values for the column(s) in the
range, useful as arguments to from and to:
DB.create_table(:table1a, partition_of: :table1) do
from minvalue
to 0
end
DB.create_table(:table1b, partition_of: :table1) do
from 0
to 100
end
DB.create_table(:table1c, partition_of: :table1) do
from 100
to maxvalue
end
For list partitioning, you use the values_in method. You can also
use the default method to mark a partition as the default partition:
DB.create_table(:table2a, partition_of: :table2) do
values_in 1, 2, 3
end
DB.create_table(:table2b, partition_of: :table2) do
values_in 4, 5, 6
end
DB.create_table(:table2c, partition_of: :table2) do
default
end
For hash partitioning, you use the modulus and remainder methods:
DB.create_table(:table3a, partition_of: :table3) do
modulus 3
remainder 0
end
DB.create_table(:table3b, partition_of: :table3) do
modulus 3
remainder 1
end
DB.create_table(:table3c, partition_of: :table3) do
modulus 3
remainder 2
end
* On PostgreSQL 12+ and SQLite 3.31+, column schema hashes now have
a :generated entry for whether the column is a generated column.
* The schema_dumper extension now dumps generated columns correctly
when using the :same_db option on PostgreSQL 12+.
* A skip_saving_columns plugin has been added. This allows skipping
saving of specific columns for the model. By default, it skips
saving of generated columns, but you can customize the columns
that it skips:
Album.plugin :skip_saving_columns
Album.skip_saving_columns = [:some_column]
= Other Improvements
* The alter_table drop_constraint :primary_key option on SQLite now
works correctly for non-integer primary keys.
* When an error is raised due to an irreversible migration, the error
message now includes the file containing the migration for easier
debugging.
sequel-5.63.0/doc/release_notes/5.32.0.txt 0000664 0000000 0000000 00000003756 14342141206 0017776 0 ustar 00root root 0000000 0000000 = New Features
* A fiber_concurrency extension has been added, for using
Fiber.current instead of Thread.current when checking out a
connection. This allows separate fibers of the same thread
to use separate connections. In addition to allowing direct use
of fibers, this also allows concurrent use of multiple enumerators
that use database connections in the same thread.
When using this extension, you must be careful and ensure that you
are not using more concurrent fibers than your connection pool size.
Otherwise, all fibers will block while one fiber waits until a
connection is available. It is possible this issue will be
addressed when Ruby implements a fiber scheduler (currently
being discussed for inclusion in Ruby 3).
* A run_transaction_hooks Database extension has been added,
allowing for running the transaction hooks before commit/rollback,
which can be helpful for testing the hooks when using transactional
testing.
= Other Improvements
* Database#create_table? now works correctly with the :partition_of
option on PostgreSQL.
* The timestamp(N) with time zone type is now recognized by the
schema parser.
* Singleton methods of the Sequel module have now been moved into a
Sequel::SequelMethods module. This allows you to extend Sequel
with a module that overrides the methods and call super to get
the default behavior.
* The pg_inet extension no longer defines inet/cidr conversion procs
if sequel_pg 1.13.0+ is in use. This is because sequel_pg 1.13.0+
will respect the conversion procs and defining them makes things
slower. sequel_pg 1.13.0+ handles the same conversion by default
without needing a conversion proc.
* Method visibility issues in the model, plugin, extension, and adapter
code have been fixed. Most cases fixed were private methods being
accidentally made public when they were overridden.
During this change, Model#_insert_values was changed from public to
private, since it was originally intended to be private.
sequel-5.63.0/doc/release_notes/5.33.0.txt 0000664 0000000 0000000 00000001557 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Features
* Custom join types are now supported on a per-association basis when
using eager_graph/association_join. This builds on the previous
support for custom aliases, using Sequel::SQL::AliasedExpression:
class Artist < Sequel::Model; end
class Album < Sequel::Model; end
class Track < Sequel::Model; end
Artist.one_to_many :albums
Album.one_to_many :tracks
Artist.eager_graph(
Sequel[:albums].as(:a, join_type: :inner) =>
Sequel[:tracks].as(:t, join_type: :left)
)
* A Database#current_timestamp_utc accessor has been added on SQLite.
Setting this to true will keep CURRENT_TIMESTAMP, CURRENT_TIME, and
CURRENT_DATE in UTC instead of converting them to localtime.
= Other Improvements
* The smallserial PostgreSQL type is now recognized and Sequel will
not try to mark smallserial columns as identity columns.
sequel-5.63.0/doc/release_notes/5.34.0.txt 0000664 0000000 0000000 00000003030 14342141206 0017761 0 ustar 00root root 0000000 0000000 = New Features
* The association_pks plugin now creates *_pks_dataset methods for
each association. These are similar to the existing *_pks getter
methods, but they return a dataset of the keys instead of the keys
themselves.
* The association_pks plugin now supports a :cache_pks association
option, which will cache calls to the *_pks getter method. The
default behavior remains that the *_pks getter method only returns
cached values if the *_pks= setter method has been used to set the
values.
* The *_pks getter methods supported by the association_pks plugin
now support a :refresh option to ignore any cached values, similar
to how the association getter methods work.
= Other Improvements
* If trying to disconnect a server that doesn't exist when using a
sharded connection pool, a Sequel::Error is now raised. Previously,
the sharded threaded pool raised a NoMethodError and the sharded
single connection pool did not raise an error.
* If using the :savepoint option when savepoints are not supported,
a Sequel::InvalidOperation exception is now raised, instead of a
NoMethodError.
* Calling Dataset#eager_graph with no arguments now returns the
dataset.
* If not connected to the database, the single connection pool will
not yield any connections to Database#pool.all_connections.
* Forcing a :ruby eager limit strategy for an association without a
limit or offset now works correctly.
* Multiple unnecessary conditionals have been removed.
* Sequel core and model code now have 100% branch coverage.
sequel-5.63.0/doc/release_notes/5.35.0.txt 0000664 0000000 0000000 00000004101 14342141206 0017762 0 ustar 00root root 0000000 0000000 = New Features
* An instance_specific_default plugin has been added for setting the
default for the :instance_specific association option, or
warning/raises in cases where it is not specified. This allows
you to easily find associations that would be considering instance
specific by default, and mark them as not instance specific for
better performance.
= Other Improvements
* Setting the :instance_specific association option to false now
works correctly if the association uses a block. Associations
that set the :dataset option are now always considered instance
specific, even if the :instance_specific option is explicitly
passed.
* The validation_class_methods plugin now considers all :if,
:allow_missing, :allow_nil, and :allow_blank options. Previously,
it only considered the first of those options that was set.
* Model.finalize_associations no longer breaks if you have
instance-specific associations.
* Model.plugin now warns if you load the plugin with arguments or a
block if the plugin does not accept arguments or block. This is
because a future change to Sequel could break the call.
* When emulating unsupported alter table operations on SQLite, Sequel
now copies composite unique constraints unless the alter table
operation is the dropping of a unique constraint.
* Sequel now recognizes an additional disconnect error in the oracle
adapter.
* In the run_transaction_hooks extension, calling
run_after_{commit,rollback}_hooks now raises the correct exception
class.
* In the pg_range extension, conversion procs for the tsrange[] and
tstzrange[] types are not added unless the Database uses the
pg_array extension.
* Multiple unnecessary conditionals in plugins and extensions have
been removed.
* Sequel plugin and extension code now have 100% branch coverage.
* Sequel now avoids a statement not reached verbose warning in
Dataset#clone.
= Backwards Compatibility
* The output of Dataset#to_dot in the to_dot extension has changed
slightly, including hash entries with nil keys. These entries
were previously ignored.
sequel-5.63.0/doc/release_notes/5.36.0.txt 0000664 0000000 0000000 00000003764 14342141206 0020001 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#with_ties has been added on PostgreSQL 13+ and Microsoft
SQL Server, which will have a limited dataset also return all
rows with the same order as the final row.
* In the pg_json_ops extension, the following methods have been
added to Postgres::JSONBOp, all of which require PostgreSQL 13+:
* #set_lax
* #path_exists_tz!
* #path_match_tz!
* #path_query_tz
* #path_query_array_tz
* #path_query_first_tz
* On Oracle, the Database#view_exists? method now accepts a
:current_schema option to limit the views returned to the
current schema, instead of all non-system schemas.
= Other Improvements
* Sequel will now pass keyword arguments through in the following
cases:
* When loading plugins (Model.plugin)
* Class methods automically defined for methods defined in a
Model.dataset_module block
* Methods defined by Plugins.def_dataset_method
* Database methods called inside migrations
* Methods called via an association proxy when using the
association_proxies plugin.
* Dataset methods called inside a Dataset#query block when using
the query extension.
Previously, keywords were not handled in these cases, which would
cause deprecation warnings in Ruby 2.7 and ArgumentErrors in Ruby
3.0. Note that Sequel itself does not use keyword arguments at
all, so all of these changes only affect cases where external
methods are defined that accept keywords, and Sequel methods are
called with keywords that end up being delegated to the external
methods.
* The odbc adapter will now stream result sets instead of loading
the entire result set in memory and then iterating over it.
* Sequel now recognizes another disconnect error in the mysql and
mysql2 adapters.
= Backwards Compatibility
* Due to the odbc adapter change to use streaming, issuing queries
inside a Dataset#each block will no longer work unless a different
shard or thread is used. The behavior of such code is considered
undefined on all Sequel adapters.
sequel-5.63.0/doc/release_notes/5.37.0.txt 0000664 0000000 0000000 00000002324 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Features
* Model#column_previously_was and #column_previously_changed? have
been added to the dirty plugin, for getting the previous values
of the column before saving and for whether there were changes
before saving.
Model#column_previously_changed? accepts :from and :to options
to allow you to more easily determine if the value changed from
and/or to specific values.
This information was previously obtainable via
Model#previous_changes, but these new methods offer a friendlier
interface.
* Postgres::PGRow::{Array,Hash}Row#op has been added to the
pg_row_ops extension if the pg_row extension is loaded. This
is similar to how the pg_array_ops, pg_hstore_ops, and
pg_json_ops and #op method to their objects. This makes it
easier to perform row operations on literal rows.
= Other Improvements
* The schema_dumper extension now supports more unsigned numeric
types, such as "decimal(7,2) unsigned" and "real unsigned".
* IntegerMigrator now raises an Migrator::Error if attempting to
migrate down when there are migration files missing and needed for
the down migration. Previously, IntegerMigrator would not raise an
exception and would make no database changes in this case.
sequel-5.63.0/doc/release_notes/5.38.0.txt 0000664 0000000 0000000 00000002100 14342141206 0017762 0 ustar 00root root 0000000 0000000 = New Features
* The jdbc/mysql adapter now supports the newer
com.mysql.cj.jdbc.Driver driver. The adapter will still attempt to
load the older com.mysql.jdbc.Driver if the com.mysql.cj.jdbc.Driver
is not found.
= Other Improvements
* When testing a connection after creating a new Database instance
raises an exception, the Database instance is removed from
Sequel::DATABASES.
* The single_table_inheritance and prepared_statements plugins now
work correctly if loaded into the same class.
* Database connect and disconnect errors are no longer swallowed when
calling Database#create_or_replace_view, Database#server_version
on PostgreSQL, or Database#create_table* on Oracle.
= Backwards Compatibility
* Previously, instantiating a new Database instance directly using
Sequel::Database.new did not test the connection by default. That
was instead handled by Sequel::Database.connect. The test
connection now happens inside Database#initialize. This should only
affect backwards compatibility for code that is calling
Sequel::Database.new directly.
sequel-5.63.0/doc/release_notes/5.39.0.txt 0000664 0000000 0000000 00000001176 14342141206 0017777 0 ustar 00root root 0000000 0000000 = New Features
* On Microsoft SQL Server, the :clustered option is now supported
for primary key and unique constraints. You can use a true value
for CLUSTERED and a false value for NONCLUSTERED.
= Other Improvements
* Partitioned tables are now included in the result of
Database#tables on PostgreSQL.
* alter_table set_column_allow_null no longer drops the size of
binary columns on Microsoft SQL Server.
* In the tree plugin, the roots_dataset method now works correctly
with queries using joins by qualifying the parent column.
* A fork safety guide has been added, discussing fork safety issues
when using Sequel.
sequel-5.63.0/doc/release_notes/5.4.0.txt 0000664 0000000 0000000 00000006342 14342141206 0017707 0 ustar 00root root 0000000 0000000 = New Features
* An index_caching extension has been added, which makes
Database#indexes use a cache similar to Database#schema, and also
offers methods for saving and loading the cache from a file, similar
to the schema_caching extension.
This can speed up model loaded in certain cases when the
auto_validations plugin is used.
* A datetime_parse_to_time extension has been added, which parses
strings without timezone offsets using DateTime.parse intead of
Time.parse. This can fix problems when the string being parsed
represents a time not valid in the local timezone due to daylight
savings time shifts. Time.parse silently shifts such times by 1
hour instead of raising an exception, resulting in incorrect
behavior in that case.
It only makes sense to use this extension when the times in the
database are stored in UTC but not returned with timezone
information, the timezone for the Database instance
(or Sequel.database_timezone) is set to :utc (not the default),
and Time is used as the datetime_class (the default).
* A pg_timestamptz extension has been added for switching the default
generic timestamp type from timestamp to timestamptz.
* Sequel.date_{add,sub} in the date_arithmetic extension now supports
a :cast option for setting the cast type. This value defaults to
Time for backwards compatibility, which uses the default generic
timestamp type for the database.
* The class_table_inheritance plugin now supports an
:ignore_subclass_columns option which takes an array of column
symbols to ignore in subclasses. This allows you to use
the plugin when your table inheritance hierarchy includes
non-primary key columns with the same name in different tables.
= Improvements
* Dataset#insert_select now returns false instead of nil if it runs
an INSERT statement but does not return a value on Microsoft SQL
Server or PostgreSQL. This can happen on both databases if triggers
are used.
Model#save now checks for a false value returned by
Dataset#insert_select, and does not issue another INSERT statement
in that case.
* Database#indexes now correctly handles SQL::Identifier arguments on
SQLite, Microsoft SQL Server, SQLAnywhere, and DB2.
* Dataset#to_json in the json_serializer plugin and Dataset#to_xml
in the xml_serializer plugin now both handle datasets that use
eager_graph.
* Dataset#nullify now caches the dataset it returns, for better
performance if it is called more than once on the same dataset.
* Database#synchronize is now optimized on ruby 2.5+ and is about
10% faster by relying on the new lazy proc allocation feature.
= Backwards Compatibility
* Fractional second timestamps are now enabled on DB2. If you are
connecting to a DB2 database that does not support fractional
seconds, you should add the following code (where DB is your
Sequel::Database instance):
DB.extend_datasets do
def supports_timestamp_usecs?
false
end
end
* Calling a filtering method with no argument and a virtual row
block that returns nil on a dataset with no existing filter now
adds a WHERE NULL filter, to match the behavior if given a nil
argument. Previously, a deprecation warning was issued and a
dataset with no filter was returned.
sequel-5.63.0/doc/release_notes/5.40.0.txt 0000664 0000000 0000000 00000002740 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* On SQLite 3.33.0+, the UPDATE FROM syntax is now supported. This
allows you to update one table based on a join to another table.
The SQLite syntax is based on the PostgreSQL syntax, and the
Sequel API is the same for both. You need to pass multiple tables
to Dataset#from. The first table is the table to update, and the
remaining tables are used to construct the UPDATE FROM clause:
DB[:a, :b].where{{a[:c]=>b[:d]}}.update(:e=>'f')
# UPDATE a SET e = 'f' FROM b WHERE (a.c = b.d)
Unlike PostgreSQL, SQLite does not support the deletion of joined
datasets. Related to this, the following methods for testing
database support for modifying joined datasets have been added:
* supports_updating_joins?
* supports_deleting_joins?
= Other Improvements
* The pg_interval and date_arithmetic extensions now support
ActiveSupport 6.1.
* Sequel no longer issues method redefinition warnings in verbose
mode. As Ruby 3 has dropped uninitialized instance variable
warnings, Sequel is now verbose warning free on Ruby 3.
= Backwards Compatibility
* Trying to truncate or insert into a joined dataset now correctly
raises an exception even if the joined dataset supports updates.
* The private Dataset#check_modification_allowed! method is now
deprecated, and users (custom adapters) should now switch to one
of the more specific methods introduced in this version:
* check_insert_allowed!
* check_update_allowed!
* check_delete_allowed!
sequel-5.63.0/doc/release_notes/5.41.0.txt 0000664 0000000 0000000 00000002004 14342141206 0017757 0 ustar 00root root 0000000 0000000 = New Features
* The validation methods added by the validation_helpers plugin now
support the :skip_invalid option, which will not add a validation
error on a column if it already has a validation error. This can
be useful if you want to avoid having duplicate errors.
* The auto_validations plugin now supports a :skip_invalid plugin
option, which will pass the :skip_invalid option when calling
validation methods.
= Other Improvements
* The :adder, :remover, and :clearer association options now
support keyword arguments in Ruby 2.7+.
* In the pg_interval extension, Sequel now uses the same number of
seconds per month and seconds per year as active_support. It
originally used the same number, but active_support changed the
values in active_support 5.1. Sequel now uses the active_support
values if they are available.
* When adding a String column on PostgreSQL, an explicit text: true
option now takes precedence over an explicit :size option, as it
does in Sequel's default behavior.
sequel-5.63.0/doc/release_notes/5.42.0.txt 0000664 0000000 0000000 00000013116 14342141206 0017766 0 ustar 00root root 0000000 0000000 = New Features
* An async_thread_pool Database extension has been added, which
executes queries and processes results using a separate thread
pool. This allows you do do things like:
foos = DB[:foos].async.all
bars = DB[:bars].async.select_map(:name)
foo_bars = DB[:foo_bars].async.each{|x| p x}
and have the three method calls (all, select_map, and each)
execute concurrently. On Ruby implementations without a global
VM lock, such as JRuby, it will allow for parallel execution of
the method calls. On CRuby, the main benefit will be for cases
where query execution takes a long time or there is significant
latency between the application and the database.
When you call a method on foos, bars, or foo_bars, if the thread
pool hasn't finished processing the method, the calling code will
block until the method call has finished.
By default, for consistency, calling code will not preempt the
async thread pool. For example, if you do:
DB[:foos].async.all.size
The calling code will always wait for the async thread pool to
run the all method, and then the calling code will call size on
the result. This ensures that async queries will not use the
same connection as the the calling thread, even if calling thread
has a connection checked out.
In some cases, such as when the async thread pool is very busy,
preemption is desired for performance reasons. If you set the
:preempt_async_thread Database option before loading the
async_thread_pool extension, preemption will be allowed. With
preemption allowed, if the async thread pool has not started the
processing of the method at the time the calling code needs the
results of the method, the calling code will preempt the async
thread pool, and run the method on the current thread.
By default, the async thread pool uses the same number of threads as
the Database objects :max_connections attribute (the default for
that is 4). You can modify the number of async threads by setting
the :num_async_threads Database option before loading the Database
async_thread_pool extension.
Most Dataset methods that execute queries on the database and return
results will operate asynchronously if the the dataset is set to be
asynchronous via the Dataset#async method. This includes most
methods available due to the inclusion in Enumerable, even if not
defined by Dataset itself.
There are multiple caveats when using the async_thread_pool
extension:
* Asynchronous behavior is harder to understand and harder to
debug. It would be wise to only use this support in cases where
it provides is significant performance benefit.
* Dataset methods executed asynchronously will use a separate
database connection than the calling thread, so they will not
respect transactions in the calling thread, or other cases where
the calling thread checks out a connection directly using
Database#synchronize. They will also not respect the use of
Database#with_server (from the server_block extension) in the
calling thread.
* Dataset methods executed asynchronously should never ignore their
return value. Code such as:
DB[:table].async.insert(1)
is probablematic because without storing the return value, you
have no way to block until the insert has been completed.
* The returned object for Dataset methods executed asynchronously is
a proxy object (promise). So you should never do:
row = DB[:table].async.first
# ...
if row
end
# or:
bool = DB[:table].async.get(:boolean_column)
# ...
if bool
end
because the if branches will always be taken as row and bool will
never be nil or false. If you want to get the underlying value,
call itself on the proxy object (or __value if using Ruby <2.2).
For the same reason, you should not use the proxy objects directly
in case expressions or as arguments to Class#===. Use itself or
__value in those cases.
* Dataset methods executed asynchronously that include blocks have the
block executed asynchronously as well, assuming that the method
calls the block. Because these blocks are executed in a separate
thread, you cannot use control flow modifiers such as break or
return in them.
* An async_thread_pool model plugin has been added. This requires the
async_thread_pool extension has been loaded into the model's Database
object, and allows you to call Model.async instead of
Model.dataset.async. It also adds async support to the destroy,
with_pk, and with_pk! model dataset methods.
* Model#to_json_data has been added to the json_serializer plugin, for
returning a hash of data that can be converted to JSON, instead of
a JSON string.
* A :reject_nil option has been added to the nested_attributes method
in the nested_attributes plugin. This will ignore calls to the
nested attributes setter method where nil is passed as the setter
method argument.
= Other Improvements
* Model#freeze now works in case where model validation modifies the
object beyond adding errors.
* Model#freeze in the composition, serialization, and
serialization_modification_detection plugins now works in cases
where validation would end up loading the composed or
serialized values.
* Database#extension now avoids a possible thread safety issue that
could result in the extension being loaded into the Database twice.
* The ado adapter now supports overriding the timestamp conversion
proc. Previously, unlike other conversion procs, the timestamp
conversion proc was hard coded and could not be overridden.
sequel-5.63.0/doc/release_notes/5.43.0.txt 0000664 0000000 0000000 00000007357 14342141206 0020001 0 ustar 00root root 0000000 0000000 = New Features
* A column_encryption plugin has been added to support encrypting the
content of individual columns in a table.
Column values are encrypted with AES-256-GCM using a per-value
cipher key derived from a key provided in the configuration using
HMAC-SHA256.
If you would like to support encryption of columns in more than one
model, you should probably load the plugin into the parent class of
your models and specify the keys:
Sequel::Model.plugin :column_encryption do |enc|
enc.key 0, ENV["SEQUEL_COLUMN_ENCRYPTION_KEY"]
end
This specifies a single master encryption key. Unless you are
actively rotating keys, it is best to use a single master key.
In the above call, 0 is the id of the key, and
ENV["SEQUEL_COLUMN_ENCRYPTION_KEY"] is the content of the key, which
must be a string with exactly 32 bytes. As indicated, this key
should not be hardcoded or otherwise committed to the source control
repository.
For models that need encrypted columns, you load the plugin again,
but specify the columns to encrypt:
ConfidentialModel.plugin :column_encryption do |enc|
enc.column :encrypted_column_name
enc.column :searchable_column_name, searchable: true
enc.column :ci_searchable_column_name, searchable: :case_insensitive
end
With this, all three specified columns (encrypted_column_name,
searchable_column_name, and ci_searchable_column_name) will be
marked as encrypted columns. When you run the following code:
ConfidentialModel.create(
encrypted_column_name: 'These',
searchable_column_name: 'will be',
ci_searchable_column_name: 'Encrypted'
)
It will save encrypted versions to the database.
encrypted_column_name will not be searchable, searchable_column_name
will be searchable with an exact match, and
ci_searchable_column_name will be searchable with a case insensitive
match.
To search searchable encrypted columns, use with_encrypted_value.
This example code will return the model instance created in the code
example in the previous section:
ConfidentialModel.
with_encrypted_value(:searchable_column_name, "will be")
with_encrypted_value(:ci_searchable_column_name, "encrypted").
first
To rotate encryption keys, add a new key above the existing key,
with a new key ID:
Sequel::Model.plugin :column_encryption do |enc|
enc.key 1, ENV["SEQUEL_COLUMN_ENCRYPTION_KEY"]
enc.key 0, ENV["SEQUEL_OLD_COLUMN_ENCRYPTION_KEY"]
end
Newly encrypted data will then use the new key. Records encrypted
with the older key will still be decrypted correctly.
To force reencryption for existing records that are using the older
key, you can use the needing_reencryption dataset method and the
reencrypt instance method. For a small number of records, you can
probably do:
ConfidentialModel.needing_reencryption.all(&:reencrypt)
With more than a small number of records, you'll want to do this in
batches. It's possible you could use an approach such as:
ds = ConfidentialModel.needing_reencryption.limit(100)
true until ds.all(&:reencrypt).empty?
After all values have been reencrypted for all models, and no models
use the older encryption key, you can remove it from the
configuration:
Sequel::Model.plugin :column_encryption do |enc|
enc.key 1, ENV["SEQUEL_COLUMN_ENCRYPTION_KEY"]
end
The column_encryption plugin supports encrypting serialized data,
as well as enforcing uniquenss of searchable encrypted columns
(in the absence of key rotation). By design, it does not support
compression, mixing encrypted and unencrypted data in the same
column, or support arbitrary encryption ciphers. See the plugin
documentation for more details.
sequel-5.63.0/doc/release_notes/5.44.0.txt 0000664 0000000 0000000 00000002643 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A concurrent_eager_loading plugin has been added. This plugin
builds on top of the async_thread_pool Database extension and
allows eager loading multiple associations concurrently in
separate threads. With this plugin, you can mark datasets for
concurrent eager loading using eager_load_concurrently:
Album.eager_load_concurrently.eager(:artist, :genre, :tracks).all
Datasets that are marked for concurrent eager loading will use
concurrent eager loading if they are eager loading more than one
association. If you would like to make concurrent eager loading
the default, you can load the plugin with the :always option.
All of the association types that ship with Sequel now support
concurrent eager loading when using this plugin. For custom eager
loaders using the :eager_loader association option, please see the
documentation for the plugin for how to enable custom eager loading
for them.
= Other Improvements
* The date_arithmetic extension now handles ActiveSupport::Duration
values with weeks, as well as :weeks as a key in a hash value. Weeks
are converted into 7 days internally.
* The shared SQLite adapter now emulates the dropping of non-composite
unique constraints. Non-composite unique constraints are now
treated similarly to composite unique constraints, in that dropping
any unique constraints on a table will drop all unique constraints
on that table.
sequel-5.63.0/doc/release_notes/5.45.0.txt 0000664 0000000 0000000 00000002335 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* A auto_validations_constraint_validations_presence_message plugin
has been added that provides integration for the auto_validations
and constraint_validations plugin in the following conditions:
* The column has a NOT NULL constraint
* The column has a presence constraint validation with both
the :message and :allow_nil options used.
In this case, when saving a nil value in the column, the plugin
will make it so the more specific message from the presence
constraint validation is used, instead of the generic message
from auto_validations.
= Other Improvements
* On SQLite 3.35.0+, Sequel now uses ALTER TABLE DROP COLUMN for
dropping columns, instead of emulating the dropped column by
recreating the table.
* The Dataset#with :materialized option is now supported on SQLite
3.35.0+ for specifying whether common table expressions should be
materialized.
* The odbc adapter now correct handles boolean columns with NULL
values. Previously, such values were returned as false instead
of nil.
= Backwards Compatibility
* The change to use ALTER TABLE DROP COLUMN on SQLite 3.35.0+ can
cause backwards compatibility issues if SQLite 3.35.0+ does
not allow dropping the column.
sequel-5.63.0/doc/release_notes/5.46.0.txt 0000664 0000000 0000000 00000010414 14342141206 0017770 0 ustar 00root root 0000000 0000000 = New Features
* An unused_associations plugin has been added, which allows you to
determine which associations and association methods are not used.
You can use this to avoid defining the unused associations and
association methods, which can save memory.
This plugin is supported on Ruby 2.5+, and uses method coverage to
determine if the plugin's methods are called. Because Sequel::Model
adds association methods to an anonymous module included in the
class, directly using the method coverage data to determine which
associations are used is challenging.
This plugin is mostly designed for reporting. You can have a
test suite that runs with method coverage enabled, and use the
coverage information to get data on unused associations:
# Calls Coverage.result
cov_data = Sequel::Model.update_associations_coverage
unused_associations_data = Sequel::Model.update_unused_associations_data(coverage_data: cov_data)
Sequel::Model.unused_associations(unused_associations_data: unused_associations_data)
# => [["Class1", "assoc1"], ...]
unused_associations returns an array of two element arrays, where
the first element is the class name and the second element is the
association name. The returned values will be associations where
all of the association methods are not used.
In addition to determining which associations are not used, you can
also use this to determine if you are defining association methods
that are not used:
Sequel::Model.unused_association_options(unused_associations_data: unused_associations_data)
# => [["Class2", "assoc2", {:read_only=>true}], ...]
unused_association_options is similar to unused_associations, but
returns an array of three element arrays, where the third element
is a hash of association options that should be used to avoid
defining the unused association methods. It's common in Sequel to
define associations and only use them for reading data and not for
modifications, and you can use this to easily see which associations
are only used for reading data.
As the determination of whether associations are used is based on
method coverage, this will report as unused any associations that are
used but where the association methods are not called. These cases
are rare, but can happen if you have libraries that use the
association reflection metadata without calling the association
methods, or use the association only in combination with another
plugin such as dataset_associations. You can set the :is_used
association option to explicitly mark an association as used, and
have this plugin avoid reporting it as unused.
In addition to just reporting on unused associations, you can also
directly use the unused associations metadata to automatically avoid
defining unused associations or unused associations methods. You
can set a :file option when loading the plugin:
Sequel::Model.plugin :unused_associations, file: 'unused_associations.json'
Then run the method coverage testing. This will save the unused
associations metadata to the file. Then you can use this metadata
automatically by also setting the :modify_associations option:
Sequel::Model.plugin :unused_associations, file: 'unused_associations.json',
modify_associations: true
With the :modify_associations option, unused associations are
skipped instead of being defined, and the options returned by
unused_association_options are automatically used. Note that using
the :modify_associations option is risky unless you have complete
coverage and do not have cases where the associations are used
without calling methods.
It is common to have multiple test suites where you need to combine
coverage. The plugin supports this by using a :coverage_file option:
Sequel::Model.plugin :unused_associations, coverage_file: 'unused_associations_coverage.json'
In this case, you would run update_associations_coverage after each
test suite, and update_unused_associations_data only after all test
suites have been run.
* Passing nil as the value of the :setter, :adder, :remover, or
:clearer association options will cause the related method to not be
defined, instead of using the default value. This allows you to
only define the methods you will actually be using.
sequel-5.63.0/doc/release_notes/5.47.0.txt 0000664 0000000 0000000 00000004736 14342141206 0020003 0 ustar 00root root 0000000 0000000 = New Features
* Sequel now supports using separate queries for each table for both
lazy and eager loading of the following associations:
* many_to_many
* one_through_one
* many_through_many # many_through_many plugin
* one_through_many # many_through_many plugin
For many_to_many/one_through_one, you specify the :join_table_db
association option, which should be a Sequel::Database instance
containing the join table. It is possible for the current table,
join table, and associated table all to be in separate databases:
JOIN_TABLE_DB = Sequel.connect('...')
Album.many_to_many :artists, join_table_db: JOIN_TABLE_DB
For many_through_many/one_through_many, you can use the :db option
in each join table specification. All join tables can be in
separate databases:
JTDB1 = Sequel.connect('...')
JTDB2 = Sequel.connect('...')
# Tracks on all albums this artist appears on
Artist.many_through_many :album_tracks, [
{table: :albums_artists, left: :artist_id, right: :album_id, db: JTDB1},
{table: :artists, left: :id, right: :id, db: JTDB2}
],
class: :Track, right_primary_key: :album_id
* The :allow_eager_graph association option has been added. Setting
this option to false will disallow eager loading via #eager_graph.
This is useful if you can eager load the association via #eager,
but not with #eager_graph.
* The :allow_filtering_by association option has been added. Setting
this option to false will disallow the use of filtering by
associations for the association.
* Dataset#returning is now supported on SQLite 3.35.0+. To work around
bugs in the SQLite implementation, identifiers used in the RETURNING
clause are automatically aliased. Additionally, prepared statements
that use the RETURNING clause on SQLite seem to have issues, so the
prepared_statements plugin does not automatically use prepared
statements on SQLite for queries that use the RETURNING clause.
* Database#rename_tables has been added on MySQL to support renaming
multiple tables in the same query.
= Other Improvements
* The unused_associations plugin now tracks access to the association
reflection for associations, so it will no longer show an
association as completely unused if something is accessing the
association reflection for it. This eliminates most of the false
positives, where the plugin would show an association as unused
even though something was using it without calling the association
methods.
sequel-5.63.0/doc/release_notes/5.48.0.txt 0000664 0000000 0000000 00000001020 14342141206 0017763 0 ustar 00root root 0000000 0000000 = New Features
* A Sequel::Database#like_without_collate accessor has been added on
Microsoft SQL Server, which avoids using the COLLATE clause for
LIKE expressions. This can speed up query performance significantly.
* A private Sequel::Model::Errors#full_message method has been added
to make it easier to support internationalization for Sequel::Model
error messages.
= Other Improvements
* The association reflection tracking in the unused_associations
plugin now works correctly when combining coverage runs.
sequel-5.63.0/doc/release_notes/5.49.0.txt 0000664 0000000 0000000 00000004773 14342141206 0020006 0 ustar 00root root 0000000 0000000 = New Features
* Model#validates_no_null_byte has been added to the
validation_helpers. It checks that the value being validated does
not contain an ASCII NUL ('\0') byte. Some databases will return an
error if a string contains a NUL byte.
The auto_validations plugin will now automatically add no_null_byte
validations for all string columns in the model's table. This will
change exceptions raised by NUL bytes from database errors to
validation failures.
If you are using auto_validations and would like to have a table
accept NUL bytes in string columns, use the following code inside
the model:
skip_auto_validations(:no_null_byte)
* JSONB subscripts are now supported on PostgreSQL 14+ when using the
pg_json_ops extension. You can use JSONB subscripts to more easily
update part of a JSONB column:
DB[:table].update(Sequel.pg_jsonb_op(:column)['key'] => 'value')
UPDATE "table" SET "column"['key'] = 'value'
* hstore subscripts are now supported on PostgreSQL 14+ when using the
pg_hstore_ops extension. You can use hstore subscripts to more
easily update part of an hstore column:
DB[:table].update(Sequel.hstore_op(:column)['key'] => 'value')
UPDATE "table" SET "column"['key'] = 'value'
* Sequel now supports table aliases for JOIN USING columns on
PostgreSQL 14+. These allow you to reference the USING columns in
the query using a qualified identifier. To use this support, pass an
SQL::AliasedExpression as the expression to join on:
DB[:t1].join(:t2, Sequel.as([:c1, :c2], :alias))
# SELECT * FROM "t1" INNER JOIN "t2" USING ("c1", "c2") AS "alias"
* Database#create_trigger on PostgreSQL now supports a :replace option
for CREATE OR REPLACE TRIGGER (supported in PostgreSQL 14+).
* SQL::Expression#sequel_ast_transform has been added to support
AST transforms of custom expression classes.
= Other Improvements
* Sequel now supports calling PostgreSQL procedures without arguments
when using Database#call_procedure. Previously, attempts to call
procuredures without arguments would call the procedure with a
single NULL argument.
* Sequel now uses defined?(yield) instead of block_given? internally
for better performance on CRuby. defined?(yield) is faster as it is
built into the VM, while block_given? is a regular method and has
the overhead of calling a regular method. Note that defined?(yield)
is not implemented correctly on JRuby before 9.0.0.0, so this
release of Sequel drops support for JRuby versions before 9.0.0.0.
sequel-5.63.0/doc/release_notes/5.5.0.txt 0000664 0000000 0000000 00000004437 14342141206 0017713 0 ustar 00root root 0000000 0000000 = New Features
* The defaults_setter plugin now supports a :cache option, which
will cache default values in the model object's values hash:
Model.plugin :defaults_setter
o = Model.new
o.column # => 1 # default value
o.values # => {}
Model.plugin :defaults_setter, cache: true
o = Model.new
o.column # => 1 # default value
o.values # => {:column => 1}
* The pg_array extension now sets a :callable_default schema entry
for recognized empty array defaults.
* The pg_hstore extension now sets a :callable_default schema entry
for recognized empty hstore defaults.
* The pg_json extension now sets a :callable_default schema entry for
recognized empty json/jsonb array/hash defaults.
* The pg_inet extension now sets a :ruby_default schema entry for
recognized inet/cidr defaults.
* The pg_range extension now sets a :ruby_default schema entry for
recognized range defaults.
* The defaults_setter plugin will now give preference to a
:callable_default schema entry over a :ruby_default schema entry.
Combined with the other changes listed above, this makes default
values recognized by the pg_array, pg_hstore, and pg_json extensions
work well if the defaults_setter :cache option is also used.
= Other Improvements
* The modification_detection plugin no longer breaks column change
detection for new objects.
* Database#copy_table in the postgres adapter now handles errors that
occur when processing rows. Previously, an exception could be
raised on the next query in that case.
* The results of the changed_columns method are now cached in many
places internally where they are called in a loop. This results
in better performance, especially if the modification_detection or
serialization_modification_detection plugins are used.
= Backwards Compatibility
* The pg_interval extension now sets a :ruby_default schema entry for
recognized interval defaults to the same value Sequel would return
if the default value was returned. Previously, Sequel would use a
string in the :ruby_schema schema value.
* String values in hashes returned by Database#schema are now frozen
to prevent possible thread-safety issues and issues with
unintentional modification of a shared string. The hashes
themselves are not frozen and can still be modified.
sequel-5.63.0/doc/release_notes/5.50.0.txt 0000664 0000000 0000000 00000006525 14342141206 0017773 0 ustar 00root root 0000000 0000000 = New Features
* A pg_multirange extension has been added with support for PostgreSQL
14+ multirange types. Multirange types are similar to an array of
ranges, where a value is in the multirange if it is in any of the
ranges contained in the multirange. Multiranges are useful when you
need to check against multiple ranges that do not overlap.
You can create multiranges using Sequel.pg_multirange, passing
an array of ranges and a multirange type:
DB.extension :pg_multirange
multirange = Sequel.pg_multirange(array_of_date_ranges, :datemultirange)
Sequel.pg_multirange returns a PGMultiRange, which operates as a
delegate to an array of PGRange objects. Behavior of the object
is similar to an array, except that cover? is supported, which will
test if any of the included ranges covers the argument:
multirange.cover?(Date.today)
Like the pg_range extension, this also supports registering custom
multirange types, and using multirange types as bound variables.
The pg_range_ops extension now supports both ranges and multiranges,
with a few new methods added to Postgres::RangeOp for converting
between them:
* range_merge
* multirange
* and unnest
* An sql_log_normalizer extension has been added for normalizing
logged SQL, replacing numbers and strings inside the SQL string
with question marks. This is useful for analytics and sensitive
data.
DB[:table].first(a: 1, b: 'something')
# Without sql_log_normalizer extension, logged SQL is:
# SELECT * FROM "table" WHERE (("a" = 1) AND ("b" = 'something')) LIMIT 1
DB.extension :sql_log_normalizer
DB[:table].first(a: 1, b: 'something')
# With sql_log_normalizer_extension, logged SQL is:
# SELECT * FROM "table" WHERE (("a" = ?) AND ("b" = ?)) LIMIT ?
This extension scans the logged SQL for numbers and strings,
attempting to support the database's rules for string quoting. This
means it should work with SQL that Sequel didn't itself create.
However, there are corner cases that the extension doesn't handle,
such as the use of apostrophes inside quoted identifiers, and
potentially other cases of database specific SQL where the normal
string quoting rules are changed, such as the use of escape strings
on PostgreSQL (E'escape string').
* A :before_preconnect Database option has been added. This is useful
for configuring extensions added via :preconnect_extensions before
the connection takes place.
= Other Improvements
* Dataset#columns! now uses a LIMIT 0 query instead of a LIMIT 1 query
by default. This can improve performance in cases where the row
returned would be large. Some databases do not support a LIMIT 0
query, and some adapters that ship with Sequel have been updated to
continue using LIMIT 1. Custom adapters should be updated to use
LIMIT 1 if the database does not support LIMIT 0.
* The lazy_attributes plugin no longer modifies the database schema.
Previously, it could modify the database schema indirectly,
resulting in the loss of typecasting for models that were not
based on a single table or view, such as usage with the
class_table_inheritance plugin.
* Model#freeze in the composition, serialization, and
serialization_modification_detection plugins now returns self. In
addition to being more correct, this fixes usage of these plugins
with the static_cache plugin.
sequel-5.63.0/doc/release_notes/5.51.0.txt 0000664 0000000 0000000 00000003274 14342141206 0017772 0 ustar 00root root 0000000 0000000 = New Features
* On PostgreSQL 14+, Dataset#with_recursive now supports :search and
:cycle options for result ordering and cycle detection. These use
the SEARCH and CYCLE clauses added in PostgreSQL 14:
DB[:t].with_recursive(:t,
DB[:i1].where(parent_id: nil),
DB[:i1].join(:t, id: :parent_id).select_all(:i1),
search: {by: :id, type: :breadth},
cycle: {columns: :id, cycle_value: 1, noncycle_value: 2})
# WITH RECURSIVE t AS (
# SELECT * FROM i1 WHERE (parent_id IS NULL)
# UNION ALL
# (SELECT i1.* FROM i1 INNER JOIN t ON (t.id = i1.parent_id))
# )
# SEARCH BREADTH FIRST BY id SET ordercol
# CYCLE id SET is_cycle TO 1 DEFAULT 2 USING path
* On MySQL, column schema hashes now contain an :extra entry, which
contains the Extra string returned in MySQL's DESCRIBE results
for the column.
= Other Improvements
* When eager loading via the tactical_eager_loading plugin, objects
that already have an association loaded will not have it reloaded
unless the :eager_reload option is given.
* When cloning an association and using a different :class option
than the cloned association, the :class option given when cloning
will now take precedence over the :class option for the cloned
association.
* When using the mock postgres adapter, the adapter defaults to
supporting PostgreSQL 14 (previously, it defaulted to supporting
PostgreSQL 9.5).
* Sequel now avoids a method redefined warning in the lazy attributes
plugin in verbose warnings mode.
= Other
* Sequel's primary discussion forum is now GitHub Discussions. The
sequel-talk Google Group is still available for users who would
prefer to use that instead.
sequel-5.63.0/doc/release_notes/5.52.0.txt 0000664 0000000 0000000 00000005747 14342141206 0020002 0 ustar 00root root 0000000 0000000 = New Features
* When the sql_comments Database extension is used,
Database#with_comments is now added, which can be used for including
comments for all queries executed inside a given block. This can
be useful if you want to analyze database query logs, and want to
group all related queries:
DB.with_comments(model: Album, action: :all) do
DB[:albums].all
# SELECT * FROM albums -- model:Album,action:all
end
* An sql_comments plugin has been added, which will automatically
add SQL comments for all queries generated by model class, instance
and dataset methods:
Album.plugin :sql_comments
album = Album[1]
# SELECT * FROM albums WHERE (id = 1) LIMIT 1
# -- model:Album,method_type:class,method:[]
album.update(:name=>'A')
# UPDATE albums SET name = 'baz' WHERE (id = 1)
# -- model:Album,method_type:instance,method:update
Album.where(id: 1).delete
# DELETE FROM albums WHERE (id = 1)
# -- model:Album,method_type:dataset,method:delete
This plugin requires you have loaded the sql_comments Database
extension into the related Database before use.
* A date_parse_input_handler extension has been added to support
custom handling of input to date parsing methods. Among other
things, you can use this to limit the length of strings that
will be parsed, which can prevent ArgumentErrors in newer Ruby
versions:
Sequel.extension :date_parse_input_handler
Sequel.date_parse_input_handler do |string|
string.b[0, 128]
end
= Other Improvements
* On Ruby 3.1, the core_refinements extension now avoids the
deprecated Refinement#include, switching to
Refinement#import_methods.
* On Ruby 3.1, the subclasses plugin will use Ruby's native support
for Class#subclasses.
* The subclasses plugin has renamed descendents to descendants and
freeze_descendents to freeze_descendants. The previous method
names are still available as aliases.
* The :ruby_default schema entry for datetime/timestamp columns now
respects Sequel.datetime_class. Previously, the value for the
:ruby_default schema entry would always be a DateTime value for
such columns.
* The pg_interval extension now works with ActiveSupport 7.0.
* The shared postgres adapter now respects
Database#default_string_column_size for setting the size of string
columns that don't use text as the database type.
* Database#supports_check_constraints? now returns true on MySQL
8.0.19+. This fixes drop_constraint in certain cases when combining
the constraint dropping with other changes in the same alter_table
block.
* The mysql adapter now supports the ruby-mysql 3 API (ruby-mysql
is a pure-ruby MySQL driver).
* The mysql adapter no longer uses the connection's server_version
method if it is defined, as the method does not return the
correct value when using the ruby-mysql driver with MariaDB.
* Comments added by the sql_comments extension no longer modify
cached SQL for a dataset.
= Other
* This is Sequel's 250th release!
sequel-5.63.0/doc/release_notes/5.53.0.txt 0000664 0000000 0000000 00000001755 14342141206 0017776 0 ustar 00root root 0000000 0000000 = Improvements
* The jdbc/h2 subadapter now supports H2 version 2.0. It continues to
support H2 versions 1.3 and 1.4.
* The mysql2 adapter's prepared statement support now reuses existing
native prepared statements, instead of only binding variables on
newly prepared statements. This was the intended behavior
previously, and should result in increased performance in cases
where preparing a query takes significant time.
* The subclasses plugin now ignores an existing Class#subclasses
method if it is defined in Ruby. This fixes cases where usage of
ActiveSupport would break the subclasses plugin.
* Database#call_sproc in the jdbc adapter will now always close the
prepared call it creates. Before, if there was an exception raised
when setting the arguments for the prepared call, the prepared call
would not be closed.
* A more appropriate error is now issued if you try to use the
column_encryption plugin to encrypt a column without setting up an
encryption key.
sequel-5.63.0/doc/release_notes/5.54.0.txt 0000664 0000000 0000000 00000002247 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Feature
* An enum plugin has been added. This plugin allows you to create
model-level enums, giving names to underlying values of a column.
For example:
Album.plugin :enum
Album.enum :status_id, good: 1, bad: 2
Adds Album#good! and Album#bad! for changing the status_id to 1 or
2 respectively. It adds Album#good? and Album#bad? for checking
whether the status_id is 1 or 2 respectively. It overrides
Album#status_id to return :good or :bad instead of 1 or 2,
respectively, and overrides Album#status_id= to accept :good or
:bad instead of 1 or 2 respectively.
Additionally, it adds good and bad dataset methods for filtering
the model's dataset to records where status_id is 1 or 2
respectively. It also adds not_good and not_bad dataset methods
for filtering the model's dataset to records where status_id is not
1 or not 2 respectively.
You can use :prefix and :suffix options when calling enum to
add a prefix or suffix to the method names created. You can
set the :override_accessors option to false to not override
the accessor methods for the column, and set the :dataset_methods
option to false to not add dataset methods.
sequel-5.63.0/doc/release_notes/5.55.0.txt 0000664 0000000 0000000 00000001544 14342141206 0017774 0 ustar 00root root 0000000 0000000 = New Features
* An auto_restrict_eager_graph plugin has been added for automatically
disallowing the use of eager_graph with associations using blocks but
lacking graph_* options. This can prevent potentionally invalid usage,
as the restrictions added by the block are not used by eager_graph.
* The sqlite adapter now supports the :setup_regexp_function
Database option. This option will define a REGEXP function in the
database that will allow regexp support in queries, such as:
DB[:table].where(column: /(some|pattern)/)
Note that this creates a Ruby Regexp object per column value tested,
so it isn't the most optimal approach.
= Other Improvements
* Calling dataset aggregate methods such as #max on a model dataset now
works correctly. Previously, it could fail if called enough times to
optimize using a placeholder literalizer.
sequel-5.63.0/doc/release_notes/5.56.0.txt 0000664 0000000 0000000 00000003740 14342141206 0017775 0 ustar 00root root 0000000 0000000 = New Features
* On SQLite, Database#create_table now supports a :strict option to
use the STRICT keyword when creating the table. When this option
is used, SQLite will enforce the types for each column. When using
this option, you are limited to using the following column types:
int, integer, real, text, blob, and any (any allows for dynamic
types).
* An sqlite_json_ops extension has been added, providing DSL support
for JSON functions and operators supported in SQLite 3.38.0. Usage
is similar to the pg_json_ops extension. First, you create an
appropriate object:
j = Sequel.sqlite_json_op(:json_column)
# or:
j = Sequel[:json_column].sqlite_json_op
Then, you call methods on that object to create expressions for the
JSON functions and operators:
j[1] # (json_column ->> 1)
j.get_text(1) # (json_column -> 1)
j.extract('$.a') # json_extract(json_column, '$.a')
j.array_length # json_array_length(json_column)
j.type # json_type(json_column)
j.valid # json_valid(json_column)
j.json # json(json_column)
j.insert('$.a', 1) # json_insert(json_column, '$.a', 1)
j.set('$.a', 1) # json_set(json_column, '$.a', 1)
j.replace('$.a', 1) # json_replace(json_column, '$.a', 1)
j.remove('$.a') # json_remove(json_column, '$.a')
j.patch('{"a":2}') # json_patch(json_column, '{"a":2}')
j.each # json_each(json_column)
j.tree # json_tree(json_column)
= Other Improvements
* The alter_table add_column and add_foreign_key methods now support
the :index option to create an index on the added column, for
compatibility with the :index option on the create_table column and
foreign_key methods.
* The schema_dumper extension now treats the "INTEGER" type the same
as the "integer" type. This fixes some behavior when using SQLite
3.37.0+.
* Sequel's website has a much improved visual design.
sequel-5.63.0/doc/release_notes/5.57.0.txt 0000664 0000000 0000000 00000001737 14342141206 0020002 0 ustar 00root root 0000000 0000000 = New Features
* An is_distinct_from extension has been added with support for the
SQL IS DISTINCT FROM operator. This operator is similar to the
not equals operator, except in terms of NULL handling. It returns
true if only one side is NULL, and false if both sides are NULL.
You can call is_distinct_from on Sequel itself or on Sequel objects:
Sequel.is_distinct_from(:column_a, :column_b)
Sequel[:column_a].is_distinct_from(:column_b)
# (column_a IS DISTINCT FROM column_b)
On databases not supporting IS DISTINCT FROM, support is emulated
using a CASE statement.
* Column definitions on MySQL can use the :on_update_current_timestamp
option for ON UPDATE CURRENT_TIMESTAMP, which creates a column that
will automatically have its value set to CURRENT_TIMESTAMP on every
update.
* Database#create_function on PostgreSQL now supports a :parallel
option to set the thread safety of the funciton. The value should
be :safe, :unsafe, or :restricted.
sequel-5.63.0/doc/release_notes/5.58.0.txt 0000664 0000000 0000000 00000002256 14342141206 0020000 0 ustar 00root root 0000000 0000000 = New Features
* Dataset#merge and related #merge_* methods have been added for the
MERGE statement. MERGE is supported on PostgreSQL 15+, Oracle,
Microsoft SQL Server, DB2, H2, HSQLDB, and Derby. You can use MERGE
to insert, update, and/or delete in a single query. You call
the #merge_* methods to setup the MERGE statement, and #merge to
execute it on the database:
ds = DB[:m1]
merge_using(:m2, i1: :i2).
merge_insert(i1: :i2, a: Sequel[:b]+11).
merge_delete{a > 30}.
merge_update(i1: Sequel[:i1]+:i2+10, a: Sequel[:a]+:b+20)
ds.merge
# MERGE INTO m1 USING m2 ON (i1 = i2)
# WHEN NOT MATCHED THEN INSERT (i1, a) VALUES (i2, (b + 11))
# WHEN MATCHED AND (a > 30) THEN DELETE
# WHEN MATCHED THEN UPDATE SET i1 = (i1 + i2 + 10), a = (a + b + 20)
On PostgreSQL, the following additional MERGE related methods are
available:
* #merge_do_nothing_when_matched
* #merge_do_nothing_when_not_matched
* A :disable_split_materialized Database option is now supported on
MySQL. This disables split_materialized support in the optimizer,
working around a bug in MariaDB 10.5+ that causes failures in
Sequel's association tests.
sequel-5.63.0/doc/release_notes/5.59.0.txt 0000664 0000000 0000000 00000006460 14342141206 0020002 0 ustar 00root root 0000000 0000000 = New Features
* A require_valid_schema plugin has been added, for checking that
model classes have schema parsed as expected. By default, model
classes are not required to have valid schema, because it is
allowed to have model classes based on arbitrary datasets (such
as those using joins or set-returning functions), and it is not
possible to determine the schema for arbitary datasets.
Sequel swallows non-connection errors when trying to parse schema
for a model's dataset, but if schema parsing fails when you would
expect it to succeed, it results in a model where typecasting does
not work as expected.
The require_valid_schema plugin will raise an error when setting
the dataset for a model if schema parsing fails and the dataset
uses a simple table where you would expect schema parsing to
succeed. You can also provide an argument of :warn when loading
the plugin, to warn instead of raising an error.
This plugin may not work correctly in all cases for all adapters,
especially external adapters. Adapters are not required to support
schema parsing. Even if supported, adapters may not support
parsing schema for qualified tables, or parsing schema for views.
You should consider this plugin as a possible safety net. Users
are encouraged to try using it and report any unexpected breakage,
as that may help improve schema parsing in adapters that Sequel
ships.
* is_json and is_not_json methods have been added to the pg_json_ops
extension, for the IS [NOT] JSON operator supported in PostgreSQL
15+.
* Index creation methods on PostgreSQL 15+ now support a
:nulls_distinct option, for NULLS [NOT] DISTINCT. This allows you
to create unique indexes where NULL values are not considered
distinct.
* View creation methods on PostgreSQL 15+ now support a
:security_invoker option to create a view where access is
determined by the permissions of the role that is accessing the
view, instead of the role that created the view.
= Other Improvements
* The :allow_eager association option is now set to false by default
for associations explicitly marked as :instance_specific, if the
:eager_loader association is not given.
* The postgres adapter now supports the sequel-postgres-pr driver.
The sequel-postgres-pr driver is a slimmed down fork of the
postgres-pr driver designed specifically for use by Sequel.
* Model code that explicitly does not swallow connection errors
will also now not swallow disconnect errors. This can fix issues
where model classes are being loaded at runtime, and the query to
get the columns/schema for the model uses a connection that has
been disconnected.
* Model classes created from aliased expressions and literal
strings no longer use the simple_table optimization, as there
are cases where doing so is not safe.
= Backwards Compatibility
* The change to not swallow disconnect errors when not swallowing
connection errors can result in exceptions being raised which
weren't raised previously. In most cases, this will alert you
to issues in your application that should be fixed, but it
potentially it can result in regressions if you were OK with
the errors being swallowed. If this does result in regressions
in your application, please file an issue and we can probably
add a setting controlling this feature.
sequel-5.63.0/doc/release_notes/5.6.0.txt 0000664 0000000 0000000 00000002370 14342141206 0017706 0 ustar 00root root 0000000 0000000 = Improvements
* Running migrations using one of the included migrators on separate
Database objects in separate threads simultaneously is now
supported. Previously, the migrators were not thread-safe.
* On Ruby 2.5+, :db_type entries in the schema hashes are now deduped
for a slight memory savings when using many columns with the same
database type.
* The schema_caching extension now freezes string values in the
resulting hashes, just as the default schema parsing code started
doing in 5.5.0.
* The schema_caching extension now supports the :callable_default
schema values used by the pg_json, pg_array, and pg_hstore
extensions, by removing the entry before caching and resetting it
after restoring the cache.
* Identifier mangling rules are now respected when renaming columns on
Microsoft SQL Server.
= Backwards Compatibility
* The migrator internals were modified in order to support
thread-safety. The private Migrator#remove_migration_classes
method has been removed, and #load_migration_file now returns the
migration object/class instead of populating Migration.descendants.
Migration.descendants is now only used for temporary storage, and
will no longer contain all migration objects/classes used by the
migrator.
sequel-5.63.0/doc/release_notes/5.60.0.txt 0000664 0000000 0000000 00000001531 14342141206 0017764 0 ustar 00root root 0000000 0000000 = New Features
* The date_arithmetic extension now supports arbitrary expressions
as interval values on PostgreSQL 9.4+. Previously, only integers
were supported for the interval values.
= Other Improvements
* Most Kernel#respond_to? calls have been converted to equivalent
defined? calls for better performance. defined? is a keyword
and is about 50% faster for the same behavior.
* The is_distinct_from extension now supports the IS DISTINCT FROM
syntax natively on SQLite 3.39+, instead of emulating it.
* HAVING without GROUP BY is now supported on SQLite 3.39+.
* Coverage testing has been significantly expanded. Previously,
the core, model, plugin, and extension code had 100% line/branch
coverage. 100% line/branch coverage has been added for the
core extensions, bin/sequel, and the postgres adapter with the
pg driver.
sequel-5.63.0/doc/release_notes/5.61.0.txt 0000664 0000000 0000000 00000003311 14342141206 0017763 0 ustar 00root root 0000000 0000000 = Improvements
* When typecasting strings to other types, Sequel::Database will now
by default not typecast strings that are much longer than expected
for the underlying type. Depending on the underlying type, there
is a limit of either 100 or 1000 bytes on the input string. This
avoids potential performance issues when trying to convert
arbitrary sized user input to specific types.
* The respond_to? to defined? change made in 5.60.0 was reverted in
5.60.1 as it broke cases on Ruby < 3 where the object had an unused
refinement that added the method.
* When typecasting strings to integer, strings such as -0xa are now
treated as negative hexidecimal strings, similar to how 0xa is
treated as a positive hexidecimal string.
* Database#foreign_key_list now returns results for partitioned
tables on PostgreSQL 11+.
* Timestamps before the date of calendar reform are now handled
correctly by the pg_extended_date_support extension when using
Ruby 3.2 preview 2+.
= Backwards Compatibility
* The change to not typecast strings that are too long can break
backwards compatibility for applications that expect typecasting
for input beyond Sequel's limits. You can disable the string
bytesize checking by setting:
DB.check_string_typecast_bytesize = false
or by passing the check_string_typecast_bytesize: false option when
creating the Database instance.
* Code to workaround a bug in JRuby 9.2.0.0 has been removed from the
pg_extended_date_support extension. Users of the extension should
upgrade to a newer JRuby version.
* The is_json and is_not_json methods have been removed from the
pg_json_ops extension, as the underlying support was removed in
PostgreSQL 15 beta 4.
sequel-5.63.0/doc/release_notes/5.62.0.txt 0000664 0000000 0000000 00000012613 14342141206 0017771 0 ustar 00root root 0000000 0000000 = New Features
* The pg_auto_parameterize extension for automatically using bound
variables when using postgres adapter with the pg driver has been
added back to Sequel. This extension was originally added in Sequel
3.34.0, but was removed in 4.0.0 due to the many corner cases it
had. Almost all of the corner cases have now been fixed, and
the extension is now recommended for production use. Compared
to the original version in Sequel 3, the reintroduced version
of the extension includes the following changes:
* Handles integers used in LIMIT/ORDER
* Respects explicit CASTs
* Tries to convert column IN (int, ...) into column = ANY($) with an
array parameter
* Uses the same parameter for the same object used more than once in
a query
* Uses parameters when inserting multiple rows via Dataset#import
* Supports automatically parameterizing all of the
PostgreSQL-specific types that Sequel ships support for in pg_*
extensions (though some values of those types may not support
automatic parameterization).
* Supports skipping auto parameterization for specific values.
Automatic parameterization is generally slower than Sequel's default
behavior, since some optimizations Sequel uses by default do not
currently support automatic parameterization.
Applications may need changes to work correctly with the
pg_auto_parameterize extension, such as the addition of explicit
casts. Please read the extension documentation for more details.
* Integer column schema entries now include :min_value and :max_value
entries on most databases, indicating the minimum and maximum values
supported for the column.
The validation_helpers plugin now has validates_max_value and
validates_min_value for testing the column value is not greater
than the given maximum value and not less than the given minimum
value, respectively.
The auto_validations plugin now automatically uses the :min_value
and :max_value column schema entries with the new validation_helpers
methods to validate that the column values for integer columns are
in the allowed range.
* A primary_key_lookup_check_values plugin has been added for
automatically typecasting and checking the primary key values are
in the allowed range (given by :min_value and :max_value column
schema entries) during lookup. If typecasting fails or the value
is outside the allowed range, the primary key lookup will return
nil without issuing a query (or will raise a NoMatchingRow error
if using with_pk!).
Note that this can change behavior in some cases if you are
passing filter conditions during lookup instead of passing primary
key values. The plugin tries to support most common filter
conditions, but there are still cases that will break.
* Sequel now supports shard-specific :after_connect and :connect_sqls
Database options, allowing you to customize behavior for specific
shards:
DB = Sequel.connect('url', servers: {
:shard1 => {host: '...', after_connect: proc{|conn|}},
:shard2 => {host: '...', connect_sqls: ['...']},
})
Note that these shard-specific options will not be respected if you
are calling after_connect= or connect_sqls= on the Database's
connection pool.
= Other Improvements
* A Sequel::Postgres::IntegerOutsideBigintRange exception will now be
raised if trying to literalize an integer outside PostgreSQL bigint
range, to avoid PostgreSQL treating the integer as a numeric type
and not respecting indexes on the related column.
A pg_extended_integer_support extension has been added for
customizing the behavior when literalizing an integer outside
PostgreSQL bigint range, either quoting it or getting the
historical behavior of using it directly in the query.
* Dataset#import and #multi_insert no longer use transactions when
they only run a single query.
* Fractional seconds in timestamps are now respected in the
named_timezones extension.
* Using hstore[] types as bound variables now works on PostgreSQL.
* Using BC dates and timestamps in bound variables now works on
PostgreSQL.
* A corner case has been fixed in eager loading where the window
function eager limit strategy would be used without removing the
row_number entries from the result.
* The shared postgres adapter now caches reflection datasets, speeding
up Database#indexes and similar methods.
* The mock postgres adapter now assumes PostgreSQL 15 instead of
PostgreSQL 14 by default.
= Backwards Compatibility
* If you are using Ruby integers outside PostgreSQL bigint range when
dealing with PostgreSQL numeric column values, this version may
not be compatible. It is recommended you explicitly convert the
Ruby integers to BigDecimal objects if you are using them for
numeric column values. You can also use the
pg_extended_integer_support extension introduced in this version.
= Workaround for Older Versions
* If you cannot upgrade to Sequel 5.62.0, but still want to avoid the
problems that come from using literal large integers on PostgreSQL,
you can use the following code, where DB is your Sequel::Database
object:
DB.extend_datasets do
def literal_integer(v)
if v > 9223372036854775807 || v < -9223372036854775808
raise Sequel::InvalidValue, "PostgreSQL int too large: #{v}"
end
super
end
end
This workaround should work all the way back to Sequel 3.29.0,
released in November 2011.
sequel-5.63.0/doc/release_notes/5.63.0.txt 0000664 0000000 0000000 00000003032 14342141206 0017765 0 ustar 00root root 0000000 0000000 = New Features
* On Ruby 3.2, the pool_class: :timed_queue Database option can now
be used to use an alternative connection pool that stores
connections in a queue, and uses the new Queue#pop :timeout option
in Ruby 3.2 to implement the pool timeout. This new connection
pool is simpler than the default connection pool. It is not yet
the default connection pool on Ruby 3.2, but it may become the
default in a later version. Users of Ruby 3.2 are encouraged to
try out the pool_class: :timed_queue Database option and provide
feedback on how it works in their application.
= Other Improvements
* The tactical_eager_loading plugin now works in combination with the
single_table_inheritance and class_table_inheritance plugins, when
loading an association only defined in a specific subclass.
Previously, eager loading would be skipped in such a case. Now,
an eager load will be attempted for all instances supporting the
association.
* The validate_associated plugin now avoids database type errors for
non-integer association keys. In cases where the associated object
doesn't have a value for the associated key, and the current object
does not have a key value that can be set in the associated object,
validation errors in the associated object related to the associated
key will be ignored.
* Thread-keyed connection pool hashes now use compare_by_identity for
better performance.
* The JRuby workaround in the named_timezones extension is no longer
used on JRuby 9.3.9.0+, as JRuby fixed the related bug.
sequel-5.63.0/doc/release_notes/5.7.0.txt 0000664 0000000 0000000 00000011225 14342141206 0017706 0 ustar 00root root 0000000 0000000 = New Features
* An integer64 extension has been added, which treats the Integer
class as a generic 64-bit integer type. Sequel's default behavior
for Integer is to use the integer type, which on most databases
is a 32-bit type.
This affects all internal use of the Integer class as a generic
database type, so that methods like primary_key and foreign_key
also default to using a 64-bit integer type when using this
extension.
* When using PostgreSQL 10+, you can use the :identity option when
creating columns to create identity columns:
DB.create_table(:table){Integer :id, identity: true}
# CREATE TABLE "table" ("id" integer GENERATED BY DEFAULT AS IDENTITY)
If you want to disallow using a user provided value when inserting,
or updating you can use a value of :always:
DB.create_table(:table){Integer :id, identity: :always}
# CREATE TABLE "table" ("id" integer GENERATED ALWAYS AS IDENTITY)
* Database#convert_serial_to_identity has been added on PostgreSQL 10.2+.
This method can convert existing serial columns to identity columns
in most cases, but it currently requires superuser permissions as it
modifies the system tables directly.
* Dataset#overriding_system_value and #overriding_user_value are
now supported on PostgreSQL to work with identity columns. You can
use #overriding_system_value to force the use of a user provided
value for identity columns that are GENERATED ALWAYS, and you can
use #overriding_user_value to ignore any user value for identity
columns and always use the next entry in the sequence.
= Other Improvements
* On PostgreSQL 10.2+, identity columns are now used instead of serial
columns as the default for auto incrementing primary keys:
DB.create_table(:table){primary_key :id}
# Sequel 5.7.0+ and PostgreSQL 10.2+
# CREATE TABLE "table" ("id" integer
# GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY)
# Older Sequel version or older PostgreSQL version
# CREATE TABLE "table" ("id" serial PRIMARY KEY)
Identity columns fix many issues that serial columns have, in
addition to being the SQL standard way to support auto incrementing
columns.
* PostgreSQL identity columns are now correctly recognized and the
:auto_increment schema entry is now populated for them.
* Dataset#with_sql_{all,each,first,single_value} now use a cached
dataset to avoid clobbering the current dataset's columns.
Previously, the clobbering of the current dataset's columns was
documented and the method warned against using SQL with
different columns. These methods are now safe to use in such
cases, but will not have the same performance advantages if the
current dataset is not cached.
* On ruby 2.1+, Sequel now uses Process::CLOCK_MONOTONIC when
performing elapsed time calculations so that it is not affected by
modifications to the system's time.
* In the postgres adapter, prepared statement errors related to
changing types are now treated as disconnect errors. While they
are not technically disconnect errors, treating them as such
will in general reduce the total number of exceptions generated
from 1 per affected statement per connection to 1 per
connection.
* In the pg_array_associations plugin, the array_type for
pg_array_to_many and many_to_pg_array association reflections is
now always the scalar type for the array (e.g. integer). Previously,
the array type (e.g. integer[]) was used in some cases. This didn't
previously result in issues as PostgreSQL considers integer[][] the
same type as integer[].
* In the pg_array_associations plugin, the many_to_pg_array
association remove_all_* method now uses the appropriate cast to
work for non-integer array types such as bigint[].
* Database#server_version on PostgreSQL 10.1+ now works correctly
when the connection does not support the server_version method.
Now the server_version_num database setting is always used to
ensure consistent behavior across adapters.
* In the jdbc/oracle adapter, temporary clobs are now manually
freed to prevent a memory leak, in line with the Oracle JDBC
driver recommendations.
* The Sequel <4 release notes and changelog are no longer shipped
with the gem, decreasing the size of the gem by 20%.
= Backwards Compatibility
* The switch to using identity columns instead of serial columns
by default on PostgreSQL 10.2+ may break backwards compatibilty
in some situations, such as code that relies on what are generally
considered bugs in serial columns, such as CREATE TABLE LIKE
using the same sequence for the column in both the existing table
and the new table, or that dropping the default value for the
column does not drop the related sequence.
sequel-5.63.0/doc/release_notes/5.8.0.txt 0000664 0000000 0000000 00000014741 14342141206 0017715 0 ustar 00root root 0000000 0000000 = New Features
* A pg_auto_constraint_validations plugin has been added, which
automatically converts many constraint violations raised as
exceptions to ValidationFailed exceptions when saving a model
instance.
The following constraint violation types are recognized and
supported:
* NOT NULL
* CHECK
* UNIQUE (except expression/functional indexes)
* FOREIGN KEY (both referencing and referenced by)
In the cases where the plugin cannot determine an appropriate
validation failure for the constraint violation, it just
reraises the original exception.
This plugin is not intended as a replacement for other validations,
it is intended as a last resort. The purpose of validations is to
provide nice error messages for the user, and the error messages
generated by this plugin are fairly generic. The error messages can
be customized using the :messages plugin option, but there is only a
single message used per constraint type.
* Database#check_constraints has been added on PostgreSQL. This
returns metadata related to each check constraint on a table:
DB.create_table(:foo) do
Integer :i
Integer :j
constraint(:ic, Sequel[:i] > 2)
constraint(:jc, Sequel[:j] > 2)
constraint(:ijc, Sequel[:i] - Sequel[:j] > 2)
end
DB.check_constraints(:foo)
# => {
# :ic=>{:definition=>"CHECK ((i > 2))", :columns=>[:i]},
# :jc=>{:definition=>"CHECK ((j > 2))", :columns=>[:j]},
# :ijc=>{:definition=>"CHECK (((i - j) > 2))", :columns=>[:i, :j]}
# }
* Database#foreign_key_list now supports a :reverse option on
PostgreSQL, which returns foreign keys referencing the given table,
instead of of foreign keys in the given table referencing other
tables:
DB.create_table!(:a) do
primary_key :id
Integer :i
Integer :j
foreign_key :a_id, :a, :foreign_key_constraint_name=>:a_a
unique [:i, :j]
end
DB.create_table!(:b) do
foreign_key :a_id, :a, :foreign_key_constraint_name=>:a_a
Integer :c
Integer :d
foreign_key [:c, :d], :a, :key=>[:j, :i], :name=>:a_c_d
end
DB.foreign_key_list(:a, :reverse=>true)
# => [
# {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action,
# :on_delete=>:no_action, :deferrable=>false, :table=>:a, :schema=>:public},
# {:name=>:a_a, :columns=>[:a_id], :key=>[:id], :on_update=>:no_action,
# :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public},
# {:name=>:a_c_d, :columns=>[:c, :d], :key=>[:j, :i], :on_update=>:no_action,
# :on_delete=>:no_action, :deferrable=>false, :table=>:b, :schema=>:public}
# ]
* Dataset#nowait has been added, which will make the query fail
with a Sequel::DatabaseLockTimeout exception if it encounters
a locked row, overriding the default database behavior that
would wait until the lock was released. This method is supported
on PostgreSQL, Microsoft SQL Server, Oracle, and MySQL 8+.
* Database#indexes now supports an :include_partial option on
PostgreSQL, which will include partial indexes in the output (Sequel
by default excludes partial indexes).
* Common table expressions and window functions are now supported when
using MySQL 8+.
* Dataset#skip_locked is now supported on MySQL 8+.
* The connection_expiration extension now supports a
Database#connection_expiration_random_delay attribute, which is used
to randomize the expiration times, avoiding the thundering herd
problem.
* The pg_enum extension now supports a rename_enum method for renaming
existing enum types.
* Database#error_info on PostgreSQL now returns much more metadata
regarding the error.
= Other Improvements
* The dataset returned by the following dataset methods is cached,
which can improve performance significantly in certain cases:
* #distinct (without arguments or block)
* #from_self (without options)
* #lateral
* #qualify (without argument)
* #returning (without arguments)
* #select_all (without arguments)
* If the primary_key serial: true, type: :serial, or type: :bigserial
options are given on PostgreSQL 10.2+, use a serial primary key
instead of an identity primary key. This change was included in
Sequel 5.7.1.
* The :search_path Database option is now supported as a shard
option on PostgreSQL, so different shards can use different
search paths.
* The correct column order in Database#foreign_key_list on MySQL is
now forced, fixing issues on MySQL 8+.
* When using case sensitive regexp matches on MySQL 8+, Sequel now
uses the REGEXP_LIKE function instead of the REGEXP BINARY
operator, to work around what appears to be a bug in MySQL 8+
related to the change in MySQL's regexp engine.
* On MySQL 5.7+, the :extended option to Dataset#explain is now
ignored, since the :extended option's behavior in previous
MySQL versions is now the default behavior.
* The MySQL HY000 generic SQL state error code is now ignored
in the mysql2 adapter, so it falls back to using the more
accurate backup error mapping in that case.
* The pg_enum extension's schema modification methods now work
correctly if the Database instance is frozen.
* The tactical_eager_loading plugin now respects the :allow_eager
association option, and will not attempt to eagerly load
associations when :allow_eager is false.
* Using multiple add_constraint calls and a set_column_null call in
the same alter_table block on SQLite now works correctly. Note
that if you are planning on ever modifying existing tables beyond
adding columns, you should probably choose a database that natively
supports such modification (SQLite does not).
* Hashes returned by Database#foreign_key_list on PostgreSQL now
include a :schema entry, unless the support has been enabled
to make the :table entry be a qualified identifier.
* Dataset#support_cte?(:insert) no longer returns true on
SQLAnywhere. SQLAnywhere only supports common table
expressions for INSERT ... SELECT, not for all INSERT
statements. INSERT ... WITH ... SELECT is already
supported in Sequel using:
DB[:t1].insert(DB[:t2].with(DB[:t3]))
* Model#_valid? is no longer made a public method in the
error_splitter plugin.
= Backwards Compatibility
* Calling the filter method on a proxy object returned by the
association_proxies plugin now warns on ruby <2.6. This is
because starting in ruby 2.6, the behavior will change and the
method will be called on the array of associated objects
instead of on the dataset, as Enumerable#filter is being added
in ruby 2.6.
sequel-5.63.0/doc/release_notes/5.9.0.txt 0000664 0000000 0000000 00000010123 14342141206 0017704 0 ustar 00root root 0000000 0000000 = New Features
* An escaped_like extension has been added, for the creation of
LIKE/ILIKE expressions with placeholders in patterns without
access to a dataset. This adds escaped_like and escaped_ilike
methods to the same Sequel expression objects that support like
and ilike. These methods take two arguments, the first being
the pattern, with ? placeholders, and the second being the
placeholder value (which can be an array for multiple
placeholders):
Sequel.extension :escaped_like
DB[:table].where{string_column.escaped_like('?%', user_input)}
# user_input is 'foo':
# SELECT * FROM table WHERE string_column LIKE 'foo%'
# user_input is '%foo':
# SELECT * FROM table WHERE string_column LIKE '\%foo%'
* Generated columns on MySQL 5.7+ and MariaDB 5.2+ are now supported
using the :generated_always_as option when creating the column.
The :generated_type option can also be used to specify the type of
generated column (virtual or stored). Examples:
DB.add_column :t, :c, Integer, generated_always_as: Sequel[:a]+'b'
# ALTER TABLE `t` ADD COLUMN `c` varchar(255)
# GENERATED ALWAYS AS (CONCAT(`a`, 'b'))
DB.add_column :t, :c, Integer, generated_always_as: Sequel[:a]+'b',
generated_type: :virtual
# ALTER TABLE `t` ADD COLUMN `c` varchar(255)
# GENERATED ALWAYS AS (CONCAT(`a`, 'b')) VIRTUAL
DB.add_column :t, :c, Integer, generated_always_as: Sequel[:a]+'b',
generated_type: :stored
# ALTER TABLE `t` ADD COLUMN `c` varchar(255)
# GENERATED ALWAYS AS (CONCAT(`a`, 'b')) STORED
* Sequel::Model.has_dataset? has been added for checking whether the
model class has an associated dataset. This will generally be true
for most model classes, but will be false for abstract model
classes (such as Sequel::Model itself).
* Sequel::VERSION_NUMBER has been added for easier future version
comparisons. The version number for 5.9.0 is 50090.
= Other Improvements
* When disconnecting connections in the threaded connection pools,
the disconnection is performed without holding the connection
pool mutex, since disconnection may block.
* The sharded threaded connection pool no longer deadlocks when
disconnecting connections if the connection_validator or
connection_expiration extension is used.
* If a thread dies and does not check a connection back into the
connection pool, Sequel now disconnects the connection when it
detects the dead thread, instead of assuming the connection is
safe to be reused.
* When using eager_graph with cascaded associations, a unique
object is now used instead of a shared object in cases where
using a shared object may cause further cascaded associated
objects to be duplicated.
* On PostgreSQL, the ESCAPE modifier to the LIKE/ILIKE operators is
no longer used, since the default ESCAPE value is the one Sequel
uses. This change was made in order to allow the LIKE/ILIKE
operators to work with the ANY function, as PostgreSQL does not
support the use of the ESCAPE modifier in such cases.
* A hash argument passed to Model.nested_attributes in the
nested_attributes plugin is now no longer modified.
* Internal data structures for eager and eager_graph datasets are now
frozen to avoid unintentional modification.
* Nondeterministic behavior in Database#foreign_key_list with the
:reverse option on PostgreSQL is now avoided by using an
unambiguous order.
* Performance has been improved slightly by avoiding unnecessary
hash allocations.
* Performance has been improved slightly by using while instead
of Kernel#loop.
* BigDecimal() is now used instead of BigDecimal.new(), as the
latter has been deprecated.
* The jdbc adapter now avoids referencing ::NativeException on JRuby
9.2+, since JRuby has deprecated it. It is still used on older
versions of JRuby, since some JRuby 1.7 code may still require it.
* Sequel now works around multiple Date/Time conversion bugs in
JRuby 9.2.0.0 for BC dates in the pg_extended_date_support
extension. These bugs have already been fixed in JRuby, and
the workarounds will be removed after the release of JRuby
9.2.1.0.
sequel-5.63.0/doc/schema_modification.rdoc 0000664 0000000 0000000 00000054110 14342141206 0020504 0 ustar 00root root 0000000 0000000 = Schema modification methods
Here's a brief description of the most common schema modification methods:
== +create_table+
+create_table+ is the most common schema modification method, and it's used for adding new tables
to the database. You provide it with the name of the table as a symbol, as well a block:
create_table(:artists) do
primary_key :id
String :name
end
Note that if you want a primary key for the table, you need to specify it, Sequel does not create one
by default.
=== Column types
Most method calls inside the create_table block will create columns, since +method_missing+ calls +column+.
Columns are generally created by specifying the column type as the method
name, followed by the column name symbol to use, and after that any options that should be used.
If the method is a ruby class name that Sequel recognizes, Sequel will transform it into the appropriate
type for the given database. So while you specified +String+, Sequel will actually use +varchar+ or
+text+ depending on the underlying database. Here's a list of all ruby classes that Sequel will
convert to database types:
create_table(:columns_types) do # common database type used
Integer :a0 # integer
String :a1 # varchar(255)
String :a2, size: 50 # varchar(50)
String :a3, fixed: true # char(255)
String :a4, fixed: true, size: 50 # char(50)
String :a5, text: true # text
File :b # blob
Fixnum :c # integer
Bignum :d # bigint
Float :e # double precision
BigDecimal :f # numeric
BigDecimal :f2, size: 10 # numeric(10)
BigDecimal :f3, size: [10, 2] # numeric(10, 2)
Date :g # date
DateTime :h # timestamp
Time :i # timestamp
Time :i2, only_time: true # time
Numeric :j # numeric
TrueClass :k # boolean
FalseClass :l # boolean
end
Note that in addition to the ruby class name, Sequel also pays attention to the column options when
determining which database type to use. Also note that for boolean columns, you can use either
TrueClass or FalseClass, they are treated the same way (ruby doesn't have a Boolean class).
Also note that this conversion is only done if you use a supported ruby class name. In all other
cases, Sequel uses the type specified verbatim:
create_table(:columns_types) do # database type used
string :a1 # string
datetime :a2 # datetime
blob :a3 # blob
inet :a4 # inet
end
In addition to specifying the types as methods, you can use the +column+ method and specify the types
as the second argument, either as ruby classes, symbols, or strings:
create_table(:columns_types) do # database type used
column :a1, :string # string
column :a2, String # varchar(255)
column :a3, 'string' # string
column :a4, :datetime # datetime
column :a5, DateTime # timestamp
column :a6, 'timestamp(6)' # timestamp(6)
end
If you use a ruby class as the type, Sequel will try to guess the appropriate type name for the
database you are using. If a symbol or string is used as the type, it is used verbatim as the type
name in SQL, with the exception of :Bignum. Using the symbol :Bignum as a type will use the
appropriate 64-bit integer type for the database you are using.
=== Column options
When using the type name as method, the third argument is an options hash, and when using the +column+
method, the fourth argument is the options hash. The following options are supported:
:default :: The default value for the column.
:index :: Create an index on this column. If given a hash, use the hash as the
options for the index.
:null :: Mark the column as allowing NULL values (if true),
or not allowing NULL values (if false). If unspecified, will default
to whatever the database default is (usually true).
:primary_key :: Mark this column as the primary key. This is used instead of the
primary key method if you want a non-autoincrementing primary key.
:primary_key_constraint_name :: The name to give the primary key constraint.
:type :: Overrides the type given as the method name or a separate argument.
Not usually used by +column+ itself, but often by other methods such
as +primary_key+ or +foreign_key+.
:unique :: Mark the column as unique, generally has the same effect as
creating a unique index on the column.
:unique_constraint_name :: The name to give the unique constraint.
=== Other methods
In addition to the +column+ method and other methods that create columns, there are other methods that can be used:
==== +primary_key+
You've seen this one used already. It's used to create an autoincrementing integer primary key column.
create_table(:a0){primary_key :id}
If you want an autoincrementing 64-bit integer:
create_table(:a0){primary_key :id, type: :Bignum}
If you want to create a primary key column that doesn't use an autoincrementing integer, you should
not use this method. Instead, you should use the :primary_key option to the +column+ method or type
method:
create_table(:a1){Integer :id, primary_key: true} # Non autoincrementing integer primary key
create_table(:a2){String :name, primary_key: true} # varchar(255) primary key
If you want to create a composite primary key, you should call the +primary_key+ method with an
array of column symbols. You can provide a specific name to use for the primary key constraint
via the :name option:
create_table(:items) do
Integer :group_id
Integer :position
primary_key [:group_id, :position], name: :items_pk
end
If provided with an array, +primary_key+ does not create a column, it just sets up the primary key constraint.
==== +foreign_key+
+foreign_key+ is used to create a foreign key column that references a column in another table (or the same table).
It takes the column name as the first argument, the table it references as the second argument, and an options hash
as its third argument. A simple example is:
create_table(:albums) do
primary_key :id
foreign_key :artist_id, :artists
String :name
end
+foreign_key+ accepts the same options as +column+. For example, to have a unique foreign key with varchar(16) type:
foreign_key :column_name, :table, unique: true, type: 'varchar(16)'
+foreign_key+ also accepts some specific options:
:deferrable :: Makes the foreign key constraint checks deferrable, so they aren't checked
until the end of the transaction.
:foreign_key_constraint_name :: The name to give the foreign key constraint.
:key :: The column in the associated table
that this column references. Unnecessary if this column
references the primary key of the associated table, at least
on most databases.
:on_delete :: Specify the behavior of this foreign key column when the row with the primary key
it references is deleted, can be :restrict, :cascade, :set_null, or :set_default.
You can also use a string, which is used literally.
:on_update :: Specify the behavior of this foreign key column when the row with the primary key
it references modifies the value of the primary key. Takes the same options as
:on_delete.
Like +primary_key+, if you provide +foreign_key+ with an array of symbols, it will not create a
column, but create a foreign key constraint:
create_table(:artists) do
String :name
String :location
primary_key [:name, :location]
end
create_table(:albums) do
String :artist_name
String :artist_location
String :name
foreign_key [:artist_name, :artist_location], :artists
end
When using an array of symbols, you can also provide a :name option to name the constraint:
create_table(:albums) do
String :artist_name
String :artist_location
String :name
foreign_key [:artist_name, :artist_location], :artists, name: 'albums_artist_name_location_fkey'
end
If you want to add a foreign key for a single column with a named constraint, you must use
the array form with a single symbol:
create_table(:albums) do
primary_key :id
Integer :artist_id
String :name
foreign_key [:artist_id], :artists, name: 'albums_artist_id_fkey'
end
==== +index+
+index+ creates indexes on the table. For single columns, calling index is the same as using the
:index option when creating the column:
create_table(:a){Integer :id, index: true}
# Same as:
create_table(:a) do
Integer :id
index :id
end
create_table(:a){Integer :id, index: {unique: true}}
# Same as:
create_table(:a) do
Integer :id
index :id, unique: true
end
Similar to the +primary_key+ and +foreign_key+ methods, calling +index+ with an array of symbols
will create a multiple column index:
create_table(:albums) do
primary_key :id
foreign_key :artist_id, :artists
Integer :position
index [:artist_id, :position]
end
The +index+ method also accepts some options:
:name :: The name of the index (generated based on the table and column names if not provided).
:type :: The type of index to use (only supported by some databases)
:unique :: Make the index unique, so duplicate values are not allowed.
:where :: Create a partial index (only supported by some databases)
==== +unique+
The +unique+ method creates a unique constraint on the table. A unique constraint generally
operates identically to a unique index, so the following three +create_table+ blocks are
pretty much identical:
create_table(:a){Integer :a, unique: true}
create_table(:a) do
Integer :a
index :a, unique: true
end
create_table(:a) do
Integer :a
unique :a
end
Just like +index+, +unique+ can set up a multiple column unique constraint, where the
combination of the columns must be unique:
create_table(:a) do
Integer :a
Integer :b
unique [:a, :b]
end
==== +full_text_index+ and +spatial_index+
Both of these create specialized index types supported by some databases. They
both take the same options as +index+.
==== +constraint+
+constraint+ creates a named table constraint:
create_table(:artists) do
primary_key :id
String :name
constraint(:name_min_length){char_length(name) > 2}
end
Instead of using a block, you can use arguments that will be handled similarly
to Dataset#where:
create_table(:artists) do
primary_key :id
String :name
constraint(:name_length_range, Sequel.function(:char_length, :name)=>3..50)
end
==== +check+
+check+ operates just like +constraint+, except that it doesn't take a name
and it creates an unnamed constraint:
create_table(:artists) do
primary_key :id
String :name
check{char_length(name) > 2}
end
It's recommended that you use the +constraint+ method and provide a name for the
constraint, as that makes it easier to drop the constraint later if necessary.
== +create_join_table+
+create_join_table+ is a shortcut that you can use to create simple many-to-many join tables:
create_join_table(artist_id: :artists, album_id: :albums)
which expands to:
create_table(:albums_artists) do
foreign_key :album_id, :albums
foreign_key :artist_id, :artists
primary_key [:album_id, :artist_id]
index [:artist_id, :album_id]
end
== create_table :as
To create a table from the result of a SELECT query, instead of passing a block
to +create_table+, provide a dataset to the :as option:
create_table(:older_items, as: DB[:items].where{updated_at < Date.today << 6})
== +alter_table+
+alter_table+ is used to alter existing tables, changing their columns, indexes,
or constraints. It it used just like +create_table+, accepting a block which
is instance_evaled, and providing its own methods:
=== +add_column+
One of the most common methods, +add_column+ is used to add a column to the table.
Its API is similar to that of +create_table+'s +column+ method, where the first
argument is the column name, the second is the type, and the third is an options
hash:
alter_table(:albums) do
add_column :copies_sold, Integer, default: 0
end
=== +drop_column+
As you may expect, +drop_column+ takes a column name and drops the column. It's
often used in the +down+ block of a migration to drop a column added in an +up+ block:
alter_table(:albums) do
drop_column :copies_sold
end
=== +rename_column+
+rename_column+ is used to rename a column. It takes the old column name as the first
argument, and the new column name as the second argument:
alter_table(:albums) do
rename_column :copies_sold, :total_sales
end
=== +add_primary_key+
If you forgot to include a primary key on the table, and want to add one later, you
can use +add_primary_key+. A common use of this is to make many_to_many association
join tables into real models:
alter_table(:albums_artists) do
add_primary_key :id
end
Just like +create_table+'s +primary_key+ method, if you provide an array of symbols,
Sequel will not add a column, but will add a composite primary key constraint:
alter_table(:albums_artists) do
add_primary_key [:album_id, :artist_id]
end
It is possible to specify a name for the primary key constraint: via the :name option:
alter_table(:albums_artists) do
add_primary_key [:album_id, :artist_id], name: :albums_artists_pkey
end
If you just want to take an existing single column and make it a primary key, call
+add_primary_key+ with an array with a single symbol:
alter_table(:artists) do
add_primary_key [:id]
end
=== +add_foreign_key+
+add_foreign_key+ can be used to add a new foreign key column or constraint to a table.
Like +add_primary_key+, if you provide it with a symbol as the first argument, it
creates a new column:
alter_table(:albums) do
add_foreign_key :artist_id, :artists
end
If you want to add a new foreign key constraint to an existing column, you provide an
array with a single element:
alter_table(:albums) do
add_foreign_key [:artist_id], :artists
end
It's encouraged to provide a name when adding the constraint, via the :foreign_key_constraint_name
option if adding the column and the constraint:
alter_table(:albums) do
add_foreign_key :artist_id, :artists, foreign_key_constraint_name: :albums_artist_id_fkey
end
or via the :name option if just adding the constraint:
alter_table(:albums) do
add_foreign_key [:artist_id], :artists, name: :albums_artist_id_fkey
end
To set up a multiple column foreign key constraint, use an array with multiple column symbols:
alter_table(:albums) do
add_foreign_key [:artist_name, :artist_location], :artists, name: :albums_artist_name_location_fkey
end
=== +drop_foreign_key+
+drop_foreign_key+ is used to drop foreign keys from tables. If you provide a symbol as
the first argument, it drops both the foreign key constraint and the column:
alter_table(:albums) do
drop_foreign_key :artist_id
end
If you want to just drop the foreign key constraint without dropping the column, use
an array. It's encouraged to use the :name option to provide the constraint name to
drop, though on some databases Sequel may be able to find the name through introspection:
alter_table(:albums) do
drop_foreign_key [:artist_id], name: :albums_artist_id_fkey
end
An array is also used to drop a composite foreign key constraint:
alter_table(:albums) do
drop_foreign_key [:artist_name, :artist_location], name: :albums_artist_name_location_fkey
end
If you do not provide a :name option and Sequel is not able to determine the name
to use, it will probably raise a Sequel::Error exception.
=== +add_index+
+add_index+ works just like +create_table+'s +index+ method, creating a new index on
the table:
alter_table(:albums) do
add_index :artist_id
end
It accepts the same options as +create_table+'s +index+ method, and you can set up
a multiple column index using an array:
alter_table(:albums_artists) do
add_index [:album_id, :artist_id], unique: true
end
=== +drop_index+
As you may expect, +drop_index+ drops an existing index:
alter_table(:albums) do
drop_index :artist_id
end
Just like +drop_column+, it is often used in the +down+ block of a migration.
To drop an index with a specific name, use the :name option:
alter_table(:albums) do
drop_index :artist_id, name: :artists_id_index
end
=== +add_full_text_index+, +add_spatial_index+
Corresponding to +create_table+'s +full_text_index+ and +spatial_index+ methods,
these two methods create new indexes on the table.
=== +add_constraint+
This adds a named constraint to the table, similar to +create_table+'s +constraint+
method:
alter_table(:albums) do
add_constraint(:name_min_length){char_length(name) > 2}
end
There is no method to add an unnamed constraint, but you can pass +nil+ as the first
argument of +add_constraint+ to do so. However, it's not recommended to do that
as it is more difficult to drop such a constraint.
=== +add_unique_constraint+
This adds a unique constraint to the table, similar to +create_table+'s +unique+
method. This usually has the same effect as adding a unique index.
alter_table(:albums) do
add_unique_constraint [:artist_id, :name]
end
You can also specify a name via the :name option when adding the constraint:
alter_table(:albums) do
add_unique_constraint [:artist_id, :name], name: :albums_artist_id_name_ukey
end
=== +drop_constraint+
This method drops an existing named constraint:
alter_table(:albums) do
drop_constraint(:name_min_length)
end
There is no database independent method to drop an unnamed constraint. Generally, the
database will give it a name automatically, and you will have to figure out what it is.
For that reason, you should not add unnamed constraints that you ever might need to remove.
On some databases, you must specify the type of constraint via a :type option:
alter_table(:albums) do
drop_constraint(:albums_pk, type: :primary_key)
drop_constraint(:albums_fk, type: :foreign_key)
drop_constraint(:albums_uk, type: :unique)
end
=== +set_column_default+
This modifies the default value of a column:
alter_table(:albums) do
set_column_default :copies_sold, 0
end
To remove a default value for a column, use +nil+ as the value:
alter_table(:albums) do
set_column_default :copies_sold, nil
end
=== +set_column_type+
This modifies a column's type. Most databases will attempt to convert existing values in
the columns to the new type:
alter_table(:albums) do
set_column_type :copies_sold, :Bignum
end
You can specify the type as a string or symbol, in which case it is used verbatim, or as a supported
ruby class or the :Bignum symbol, in which case it gets converted to an appropriate database type.
=== +set_column_allow_null+
This allows you to set the column as allowing NULL values:
alter_table(:albums) do
set_column_allow_null :artist_id
end
=== +set_column_not_null+
This allows you to set the column as not allowing NULL values:
alter_table(:albums) do
set_column_not_null :artist_id
end
== Other +Database+ schema modification methods
Sequel::Database has many schema modification instance methods,
most of which are shortcuts to the same methods in +alter_table+. The
following +Database+ instance methods just call +alter_table+ with a
block that calls the method with the same name inside the +alter_table+
block with all arguments after the first argument (which is used as
the table name):
* +add_column+
* +drop_column+
* +rename_column+
* +add_index+
* +drop_index+
* +set_column_default+
* +set_column_type+
For example, the following two method calls do the same thing:
alter_table(:artists){add_column :copies_sold, Integer}
add_column :artists, :copies_sold, Integer
There are some other schema modification methods that have no +alter_table+
counterpart:
=== +drop_table+
+drop_table+ takes multiple arguments and treats all arguments as a
table name to drop:
drop_table(:albums_artists, :albums, :artists)
Note that when dropping tables, you may need to drop them in a specific order
if you are using foreign keys and the database is enforcing referential
integrity. In general, you need to drop the tables containing the foreign
keys before the tables containing the primary keys they reference.
=== drop_table?
drop_table? is similar to drop_table, except that it only drops
the table if the table already exists. On some databases, it uses
IF NOT EXISTS, on others it does a separate query to check for
existence.
=== +rename_table+
You can rename an existing table using +rename_table+. Like +rename_column+,
the first argument is the current name, and the second is the new name:
rename_table(:artist, :artists)
=== create_table!
create_table! drops the table if it exists
before attempting to create it, so:
create_table!(:artists) do
primary_key :id
end
is the same as:
drop_table?(:artists)
create_table(:artists) do
primary_key :id
end
=== create_table?
create_table? only creates the table if it does
not already exist, so:
create_table?(:artists) do
primary_key :id
end
is the same as:
unless table_exists?(:artists)
create_table(:artists) do
primary_key :id
end
end
=== +create_view+ and +create_or_replace_view+
These can be used to create views. The difference between them is that
+create_or_replace_view+ will unconditionally replace an existing view of
the same name, while +create_view+ will probably raise an error. Both methods
take the name as the first argument, and either an string or a dataset as the
second argument:
create_view(:gold_albums, DB[:albums].where{copies_sold > 500000})
create_or_replace_view(:gold_albums, "SELECT * FROM albums WHERE copies_sold > 500000")
=== +drop_view+
+drop_view+ drops existing views. Just like +drop_table+, it can accept multiple
arguments:
drop_view(:gold_albums, :platinum_albums)
sequel-5.63.0/doc/security.rdoc 0000664 0000000 0000000 00000040623 14342141206 0016372 0 ustar 00root root 0000000 0000000 = Security Considerations with Sequel
When using Sequel, there are some security areas you should be aware of:
* Code Execution
* SQL Injection
* Denial of Service
* Mass Assignment
* General Parameter Handling
== Code Execution
The most serious security vulnerability you can have in any library is
a code execution vulnerability. Sequel should not be vulnerable to this,
as it never calls eval on a string that is derived from user input.
However, some Sequel methods used for creating methods via metaprogramming
could conceivably be abused to do so:
* Sequel::Dataset.def_sql_method
* Sequel::JDBC.load_driver
* Sequel::Plugins.def_dataset_methods
* Sequel::Dataset.prepared_statements_module (private)
* Sequel::SQL::Expression.to_s_method (private)
As long as you don't call those with user input, you should not be
vulnerable to code execution.
== SQL Injection
The primary security concern in SQL database libraries is SQL injection.
Because Sequel promotes using ruby objects for SQL concepts instead
of raw SQL, it is less likely to be vulnerable to SQL injection.
However, because Sequel still makes it easy to use raw SQL, misuse of the
library can result in SQL injection in your application.
There are basically two kinds of possible SQL injections in Sequel:
* SQL code injections
* SQL identifier injections
=== SQL Code Injections
==== Full SQL Strings
Some Sequel methods are designed to execute raw SQL strings, including:
* Sequel::Database#execute
* Sequel::Database#execute_ddl
* Sequel::Database#execute_dui
* Sequel::Database#execute_insert
* Sequel::Database#run
* Sequel::Database#<<
* Sequel::Dataset#fetch_rows
* Sequel::Dataset#with_sql_all
* Sequel::Dataset#with_sql_delete
* Sequel::Dataset#with_sql_each
* Sequel::Dataset#with_sql_first
* Sequel::Dataset#with_sql_insert
* Sequel::Dataset#with_sql_single_value
* Sequel::Dataset#with_sql_update
Here are some examples of use:
DB.execute 'SQL'
DB.execute_ddl 'SQL'
DB.execute_dui 'SQL'
DB.execute_insert 'SQL'
DB.run 'SQL'
DB << 'SQL'
DB.fetch_rows('SQL'){|row| }
DB.dataset.with_sql_all('SQL')
DB.dataset.with_sql_delete('SQL')
DB.dataset.with_sql_each('SQL'){|row| }
DB.dataset.with_sql_first('SQL')
DB.dataset.with_sql_insert('SQL')
DB.dataset.with_sql_single_value('SQL')
DB.dataset.with_sql_update('SQL')
If you pass a string to these methods that is derived from user input, you open
yourself up to SQL injection. These methods are not designed to work at all
with user input. If you must call them with user input, you should escape the
user input manually via Sequel::Database#literal. Example:
DB.run "SOME SQL #{DB.literal(params[:user].to_s)}"
==== Full SQL Strings, With Possible Placeholders
Other Sequel methods are designed to support execution of raw SQL strings that may contain placeholders:
* Sequel::Database#[]
* Sequel::Database#fetch
* Sequel::Dataset#with_sql
Here are some examples of use:
DB['SQL'].all
DB.fetch('SQL').all
DB.dataset.with_sql('SQL').all
With these methods you should use placeholders, in which case Sequel automatically escapes the input:
DB['SELECT * FROM foo WHERE bar = ?', params[:user].to_s]
==== Manually Created Literal Strings
Sequel generally treats ruby strings as SQL strings (escaping them correctly), and
not as raw SQL. However, you can convert a ruby string to a literal string, and
Sequel will then treat it as raw SQL. This is typically done through
Sequel.lit[rdoc-ref:Sequel::SQL::Builders#lit].
Sequel.lit('a')
Using Sequel.lit[rdoc-ref:Sequel::SQL::Builders#lit] to turn a ruby string into a literal string results
in SQL injection if the string is derived from user input. With both of these
methods, the strings can contain placeholders, which you can use to safely include
user input inside a literal string:
Sequel.lit('a = ?', params[:user_id].to_s)
Even though they have similar names, note that Sequel::Database#literal operates very differently from
String#lit or Sequel.lit[rdoc-ref:Sequel::SQL::Builders#lit].
Sequel::Database#literal is for taking any supported object,
and getting an SQL representation of that object, while
String#lit or Sequel.lit[rdoc-ref:Sequel::SQL::Builders#lit] are for treating
a ruby string as raw SQL. For example:
DB.literal(Date.today) # "'2013-03-22'"
DB.literal('a') # "'a'"
DB.literal(Sequel.lit('a')) # "a"
DB.literal(a: 'a') # "(\"a\" = 'a')"
DB.literal(a: Sequel.lit('a')) # "(\"a\" = a)"
==== SQL Filter Fragments
Starting in Sequel 5, Sequel does not automatically convert plain strings to
literal strings in typical code. Instead, you can use Sequel.lit to
create literal strings:
Sequel.lit("name > 'A'")
To safely include user input as part of an SQL filter fragment, use Sequel.lit
with placeholders:
DB[:table].where(Sequel.lit("name > ?", params[:id].to_s)) # Safe
Be careful to never call Sequel.lit where the first argument is derived from
user input.
There are a few uncommon cases where Sequel will still convert
plain strings to literal strings.
==== SQL Fragment passed to Dataset#lock_style and Model#lock!
The Sequel::Dataset#lock_style and Sequel::Model#lock! methods also treat
an input string as SQL code. These methods should not be called with user input.
DB[:table].lock_style(params[:id]) # SQL injection!
Album.first.lock!(params[:id]) # SQL injection!
==== SQL Type Names
In general, most places where Sequel needs to use an SQL type that should
be specified by the user, it allows you to use a ruby string, and that
string is used verbatim as the SQL type. You should not use user input
for type strings.
DB[:table].select(Sequel.cast(:a, params[:id])) # SQL injection!
==== SQL Function Names
In most cases, Sequel does not quote SQL function names. You should not use
user input for function names.
DB[:table].select(Sequel.function(params[:id])) # SQL injection!
==== SQL Window Frames
For backwards compatibility, Sequel supports regular strings in the
window function :frame option, which will be treated as a literal string:
DB[:table].select{fun(arg).over(frame: 'SQL Here')}
You should make sure the frame argument is not derived from user input,
or switch to using a hash as the :frame option value.
==== auto_literal_strings extension
If the auto_literal_strings extension is used for backwards compatibility,
then Sequel will treat plain strings as literal strings if they are used
as the first argument to a filtering method. This can lead to SQL
injection:
DB[:table].where("name > #{params[:id].to_s}")
# SQL injection when using auto_literal_strings extension
If you are using the auto_literal_strings extension, you need to be very careful,
as the following methods will treat a plain string given as the first argument
as a literal string:
* Sequel::Dataset#where
* Sequel::Dataset#having
* Sequel::Dataset#filter
* Sequel::Dataset#exclude
* Sequel::Dataset#exclude_having
* Sequel::Dataset#or
* Sequel::Dataset#first
* Sequel::Dataset#last
* Sequel::Dataset#[]
Even stuff that looks like it may be safe isn't:
DB[:table].first(params[:num_rows])
# SQL injection when using auto_literal_strings extension
The Model.find[rdoc-ref:Sequel::Model::ClassMethods#find] and
Model.find_or_create[rdoc-ref:Sequel::Model::ClassMethods#find_or_create]
class methods will also treat string arguments as literal strings if the
auto_literal_strings extension is used:
Album.find(params[:id])
# SQL injection when using auto_literal_strings extension
Similar to the filter methods, the auto_literal_strings extension
also makes Sequel::Dataset#update treats a string argument as raw SQL:
DB[:table].update("column = 1")
So you should not do:
DB[:table].update(params[:changes])
# SQL injection when using auto_literal_strings extension
or:
DB[:table].update("column = #{params[:value].to_s}")
# SQL injection when using auto_literal_strings extension
Instead, you should do:
DB[:table].update(column: params[:value].to_s) # Safe
Because using the auto_literal_strings extension makes SQL injection
so much eaiser, it is recommended to not use it, and instead
use Sequel.lit with placeholders.
=== SQL Identifier Injections
Usually, Sequel treats ruby symbols as SQL identifiers, and ruby
strings as SQL strings. However, there are some parts of Sequel
that treat ruby strings as SQL identifiers if an SQL string would
not make sense in the same context.
For example, Sequel::Database#from and Sequel::Dataset#from will treat a string as
a table name:
DB.from('t') # SELECT * FROM "t"
Another place where Sequel treats ruby strings as identifiers are
the Sequel::Dataset#insert and Sequel::Dataset#update methods:
DB[:t].update('b'=>1) # UPDATE "t" SET "b" = 1
DB[:t].insert('b'=>1) # INSERT INTO "t" ("b") VALUES (1)
Note how the identifier is still quoted in these cases. Sequel quotes identifiers by default
on most databases. However, it does not quote identifiers by default on DB2.
On those databases using an identifier derived from user input can lead to SQL injection.
Similarly, if you turn off identifier quoting manually on other databases, you open yourself
up to SQL injection if you use identifiers derived from user input.
When Sequel quotes identifiers, using an identifier derived from user input does not lead to
SQL injection, since the identifiers are also escaped when quoting.
Exceptions to this are Oracle (can't escape ") and Microsoft Access
(can't escape ]).
In general, even if doesn't lead to SQL Injection, you should avoid using identifiers
derived from user input unless absolutely necessary.
Sequel also allows you to create identifiers using
Sequel.identifier[rdoc-ref:Sequel::SQL::Builders#identifier] for plain identifiers,
Sequel.qualify[rdoc-ref:Sequel::SQL::Builders#qualify] and
Sequel::SQL::Indentifier#[][rdoc-ref:Sequel::SQL::QualifyingMethods#[]] for qualified identifiers, and
Sequel.as[rdoc-ref:Sequel::SQL::Builders#as] for aliased expressions. So if you
pass any of those values derived from user input, you are dealing with the same scenario.
Note that the issues with SQL identifiers do not just apply to places where
strings are used as identifiers, they also apply to all places where Sequel
uses symbols as identifiers. However, if you are creating symbols from user input,
you at least have a denial of service vulnerability in ruby <2.2, and possibly a
more serious vulnerability.
Note that many Database schema modification methods (e.g. create_table, add_column)
also allow for SQL identifier injections, and possibly also SQL code injections.
These methods should never be called with user input.
== Denial of Service
Sequel converts some strings to symbols. Because symbols in ruby <2.2 are not
garbage collected, if the strings that are converted to symbols are
derived from user input, you have a denial of service vulnerability due to
memory exhaustion.
The strings that Sequel converts to symbols are generally not derived
from user input, so Sequel in general is not vulnerable to this. However,
users should be aware of the cases in which Sequel creates symbols, so
they do not introduce a vulnerability into their application.
=== Column Names/Aliases
Sequel returns SQL result sets as an array of hashes with symbol keys. The
keys are derived from the name that the database server gives the column. These
names are generally static. For example:
SELECT column FROM table
The database will generally use "column" as the name in the result set.
If you use an alias:
SELECT column AS alias FROM table
The database will generally use "alias" as the name in the result set. So
if you allow the user to control the alias name:
DB[:table].select(:column.as(params[:alias]))
Then you can have a denial of service vulnerability. In general, such a vulnerability
is unlikely, because you are probably indexing into the returned hash(es) by name,
and if an alias was used and you didn't expect it, your application wouldn't work.
=== Database Connection Options
All database connection options are converted to symbols. For a
connection URL, the keys are generally fixed, but the scheme is turned
into a symbol and the query option keys are used as connection option
keys, so they are converted to symbols as well. For example:
postgres://host/database?option1=foo&option2=bar
Will result in :postgres, :option1, and :option2 symbols being created.
Certain option values are also converted to symbols. In the general case,
the sql_log_level option value is, but some adapters treat additional
options similarly.
This is not generally a risk unless you are allowing the user to control
the connection URLs or are connecting to arbitrary databases at runtime.
== Mass Assignment
Mass assignment is the practice of passing a hash of columns and values
to a single method, and having multiple column values for a given object set
based on the content of the hash.
The security issue here is that mass assignment may allow the user to
set columns that you didn't intend to allow.
The Model#set[rdoc-ref:Sequel::Model::InstanceMethods#set] and Model#update[rdoc-ref:Sequel::Model::InstanceMethods#update] methods do mass
assignment. The default configuration of Sequel::Model allows all model
columns except for the primary key column(s) to be set via mass assignment.
Example:
album = Album.new
album.set(params[:album]) # Mass Assignment
Both Model.new[rdoc-ref:Sequel::Model::InstanceMethods::new] and Model.create[rdoc-ref:Sequel::Model::ClassMethods#create]
call Model#set[rdoc-ref:Sequel::Model::InstanceMethods#set] internally, so
they also allow mass assignment:
Album.new(params[:album]) # Mass Assignment
Album.create(params[:album]) # Mass Assignment
When the argument is derived from user input, instead of these methods, it is encouraged to either use
Model#set_fields[rdoc-ref:Sequel::Model::InstanceMethods#set_fields] or
Model#update_fields[rdoc-ref:Sequel::Model::InstanceMethods#update_fields],
which allow you to specify which fields to allow on a per-call basis. This
pretty much eliminates the chance that the user will be able to set a column
you did not intend to allow:
album.set_fields(params[:album], [:name, :copies_sold])
album.update_fields(params[:album], [:name, :copies_sold])
These two methods iterate over the second argument (+:name+ and +:copies_sold+ in
this example) instead of iterating over the entries in the first argument
(params[:album] in this example).
If you want to override the columns that Model#set[rdoc-ref:Sequel::Model::InstanceMethods#set]
allows by default during mass assignment, you can use the whitelist_security plugin, then call
the set_allowed_columns class method.
Album.plugin :whitelist_security
Album.set_allowed_columns(:name, :copies_sold)
Album.create(params[:album]) # Only name and copies_sold set
Being explicit on a per-call basis using the set_fields and update_fields methods is recommended
instead of using the whitelist_security plugin and setting a global whitelist.
For more details on the mass assignment methods, see the {Mass Assignment Guide}[rdoc-ref:doc/mass_assignment.rdoc].
== General Parameter Handling
This issue isn't necessarily specific to Sequel, but it is a good general practice.
If you are using values derived from user input, it is best to be explicit about
their type. For example:
Album.where(id: params[:id])
is probably a bad idea. Assuming you are using a web framework, params[:id] could
be a string, an array, a hash, nil, or potentially something else.
Assuming that +id+ is an integer field, you probably want to do:
Album.where(id: params[:id].to_i)
If you are looking something up by name, you should try to enforce the value to be
a string:
Album.where(name: params[:name].to_s)
If you are trying to use an IN clause with a list of id values based on input provided
on a web form:
Album.where(id: params[:ids].to_a.map(&:to_i))
Basically, be as explicit as possible. While there aren't any known security issues
in Sequel when you do:
Album.where(id: params[:id])
It allows the attacker to choose to do any of the following queries:
id IS NULL # nil
id = '1' # '1'
id IN ('1', '2', '3') # ['1', '2', '3']
id = ('a' = 'b') # {'a'=>'b'}
id = ('a' IN ('a', 'b') AND 'c' = '') # {'a'=>['a', 'b'], 'c'=>''}
While none of those allow for SQL injection, it's possible that they
might have an issue in your application. For example, a long array
or deeply nested hash might cause the database to have to do a lot of
work that could be avoided.
In general, it's best to let the attacker control as little as possible,
and explicitly specifying types helps a great deal there.
sequel-5.63.0/doc/sharding.rdoc 0000664 0000000 0000000 00000026763 14342141206 0016333 0 ustar 00root root 0000000 0000000 = Primary/Replica Configurations and Database Sharding
Sequel has support for primary/replica configurations (writable primary
database with read only replicas databases), as well as database sharding (where you can
pick a server to use for a given dataset). Support for both
features is database independent, and should work for all database adapters
that ship with Sequel.
== The :servers Database option
Sharding and read_only support are both enabled via the :servers database
option. Using the :servers database option makes Sequel use a connection pool
class that supports sharding, and the minimum required to enable sharding
support is to use the empty hash:
DB=Sequel.connect('postgres://primary_server/database', servers: {})
In most cases, you are probably not going to want to use an empty hash. Keys in the server hash are
not restricted to type, but the general recommendation is to use a symbol
unless you have special requirements. Values in the server hash should be
either hashes or procs that return hashes. These hashes are merged into
the Database object's default options hash to get the connection options
for the shard, so you don't need to override all options, just the ones
that need to be modified. For example, if you are using the same user,
password, and database name and just the host is changing, you only need
a :host entry in each shard's hash.
Note that all servers should have the same schema for all
tables you are accessing, unless you really know what you are doing.
== Primary and Replica Database Configurations
=== Single Primary, Single Replica
To use a single, read-only replica that handles SELECT queries, the following
is the simplest configuration:
DB=Sequel.connect('postgres://primary_server/database',
servers: {read_only: {host: 'replica_server'}})
This will use the replica_server for SELECT queries and primary_server for
other queries.
If you want to ensure your queries are going to a specific database, you
can force this for a given query by using the .server method and passing
the symbol name defined in the connect options. For example:
# Force the SELECT to run on the primary server
DB[:users].server(:default).all
# Force the DELETE to run on the read-only replica
DB[:users].server(:read_only).delete
=== Single Primary, Multiple Replicas
Let's say you have 4 replica servers with names replica_server0,
replica_server1, replica_server2, and replica_server3.
num_read_only = 4
read_only_host = rand(num_read_only)
read_only_proc = proc do |db|
{host: "replica_server#{(read_only_host+=1) % num_read_only}"}
end
DB=Sequel.connect('postgres://primary_server/database',
servers: {read_only: read_only_proc})
This will use one of the replica servers for SELECT queries and use the
primary server for other queries. It's also possible to pick a random host
instead of using the round robin approach presented above, but that can result
in less optimal resource usage.
=== Multiple Primary, Multiple Replicas
This involves the same basic idea as the multiple replicas, single primary, but
it shows that the primary database is named :default. So for 4 primary servers and
4 replica servers:
num_read_only = 4
read_only_host = rand(num_read_only)
read_only_proc = proc do |db|
{host: "replica_server#{(read_only_host+=1) % num_read_only}"}
end
num_default = 4
default_host = rand(num_default)
default_proc = proc do |db|
{host: "primary_server#{(default_host+=1) % num_default}"}
end
DB=Sequel.connect('postgres://primary_server/database',
servers: {default: default_proc, read_only: read_only_proc})
== Sharding
There is specific support in Sequel for handling primary/replica database
combinations, with the only necessary setup being the database configuration.
However, since sharding is always going to be implementation dependent, Sequel
supplies the basic infrastructure, but you have to tell it which server to use
for each dataset. Let's assume a simple scenario, a distributed rainbow
table for SHA-1 hashes, sharding based on the first hex character (for a total
of 16 shards). First, you need to configure the database:
servers = {}
(('0'..'9').to_a + ('a'..'f').to_a).each do |hex|
servers[hex.to_sym] = {host: "hash_host_#{hex}"}
end
DB=Sequel.connect('postgres://hash_host/hashes', servers: servers)
This configures 17 servers, the 16 shard servers (/hash_host_[0-9a-f]/), and 1
default server which will be used if no shard is specified ("hash_host"). If
you want the default server to be one of the shard servers (e.g. hash_host_a),
it's easiest to do:
DB=Sequel.connect('postgres://hash_host_a/hashes', servers: servers)
That will still set up a second pool of connections for the default server,
since it considers the default server and shard servers independent. Note that
if you always set the shard on a dataset before using it in queries, it will
not attempt to connect to the default server. Sequel may use the default
server in queries it generates itself, such as to get column names or table
schemas, so you should always have a default server that works.
To set the shard for a given query, you use the Dataset#server method:
DB[:hashes].server(:a).where(hash: /31337/)
That will return all matching rows on the hash_host_a shard that have a hash
column that contains 31337.
Rainbow tables are generally used to find specific hashes, so to save some
work, you might want to add a method to the dataset that automatically sets
the shard to use. This is fairly easy using a Sequel::Model:
class Rainbow < Sequel::Model(:hashes)
dataset_module do
def plaintext_for_hash(hash)
raise(ArgumentError, 'Invalid SHA-1 Hash') unless /\A[0-9a-f]{40}\z/.match(hash)
server(hash[0...1].to_sym).where(hash: hash).get(:plaintext)
end
end
end
Rainbow.plaintext_for_hash("e580726d31f6e1ad216ffd87279e536d1f74e606")
=== :servers_hash Option
The connection pool can be further controlled to change how it handles attempts
to access shards that haven't been configured. The default is
to assume the :default shard. However, you can specify a
different shard using the :servers_hash option when connecting
to the database:
DB = Sequel.connect('postgres://...', servers_hash: Hash.new(:some_shard))
You can also use this feature to raise an exception if an
unconfigured shard is used:
DB = Sequel.connect('postgres://...', servers_hash: Hash.new{raise 'foo'})
If you specify a :servers_hash option to raise an exception for non configured
shards you should also explicitly specify a :read_only entry in your :servers option
for the case where a shard is not specified. In most cases it is sufficient
to make the :read_only entry the same as the :default shard:
servers = {read_only: {}}
(('0'..'9').to_a + ('a'..'f').to_a).each do |hex|
servers[hex.to_sym] = {host: "hash_host_#{hex}"}
end
DB=Sequel.connect('postgres://hash_host/hashes', servers: servers,
servers_hash: Hash.new{raise "Invalid Server"})
=== Sharding Plugin
Sequel comes with a sharding plugin that makes it easy to use sharding with model objects.
It makes sure that objects retrieved from a specific shard are always saved back to that
shard, allows you to create objects on specific shards, and even makes sure associations
work well with shards. You just need to remember to set to model to use the plugin:
class Rainbow < Sequel::Model(:hashes)
plugin :sharding
end
Rainbow.server(:a).first(id: 1).update(plaintext: 'VGM')
If all of your models are sharded, you can set all models to use the plugin via:
Sequel::Model.plugin :sharding
=== server_block Extension
By default, you must specify the server/shard you want to use for every dataset/action,
or Sequel will use the default shard. If you have a group of queries that should use the
same shard, it can get a bit redundant to specify the same shard for all of them.
The server_block extension adds a Database#with_server method that scopes all database
access inside the block to the given shard by default:
DB.extension :server_block
DB.with_server(:a) do
# this SELECT query uses the "a" shard
if r = Rainbow.first(hash: /31337/)
r.count += 1
# this UPDATE query also uses the "a" shard
r.save
end
end
The server_block extension doesn't currently integrate with the sharding plugin, as it
ties into the Dataset#server method. This shouldn't present a problem in practice as
long as you just access the models inside the with_server block, since they will use
the shard set by with_server by default. However, you will probably have issues if
you retrieve the models inside the block and save them outside of the block. If you
need to do that, call the server method explicitly on the dataset used to retrieve the
model objects.
The with_server method also supports a second argument for the default read_only server
to use, which can be useful if you are mixing sharding and primary/replica servers:
DB.extension :server_block
DB.with_server(:a, :a_read_only) do
# this SELECT query uses the "a_read_only" shard
if r = Rainbow.first(hash: /31337/)
r.count += 1
# this UPDATE query also uses the "a" shard
r.save
end
end
=== arbitrary_servers Extension
By default, Sequel's sharding support is designed to work with predefined shards. It ships
with Database#add_servers and Database#remove_servers methods to modify these predefined
shards on the fly, but it is a bit cumbersome to work with truly arbitrary servers
(requiring you to call add_servers before use, then remove_servers after use).
The arbitrary_servers extension allows you to pass a server/shard options hash as the
server to use, and those options will be merged directly into the database's default options:
DB.extension :arbitrary_servers
DB[:rainbows].server(host: 'hash_host_a').all
# or
DB[:rainbows].server(host: 'hash_host_b', database: 'backup').all
arbitrary_servers is designed to work well in conjunction with the server_block extension:
DB.with_server(host: 'hash_host_b', database: 'backup') do
DB.synchronize do
# All queries here default to the backup database on hash_host_b
end
end
If you are using arbitrary_servers with server_block, you may want to
define the following method (or something similar) so that you don't
need to call synchronize separately:
def DB.with_server(*)
super{synchronize{yield}}
end
The reason for the synchronize method is that it checks out a connection
and makes the same connection available for the duration of the block.
If you don't do that, Sequel will probably disconnect from the database
and reconnect to the database on each request, since connections to
arbitrary servers are not cached.
Note that this extension only works with the sharded threaded connection
pool. If you are using the sharded single connection pool, you need
to switch to the sharded threaded connection pool before using this
extension. If you are passing the :single_threaded option to
the Database, just remove that option. If you are setting:
Sequel.single_threaded = true
just remove or comment out that code.
== JDBC
If you are using the jdbc adapter, note that it does not handle separate
options such as +:host+, +:user+, and +:port+. If you would like to use
the +:servers+ option when connecting to a JDBC database, each hash value in
the +servers+ option should contain a +:uri+ key with a JDBC connection string
for that shard as the value. Example:
DB=Sequel.connect('jdbc:postgresql://primary_server/database',
servers: {read_only: {uri: 'jdbc:postgresql://replica_server/database'}})
sequel-5.63.0/doc/sql.rdoc 0000664 0000000 0000000 00000066542 14342141206 0015332 0 ustar 00root root 0000000 0000000 = Sequel for SQL Users
One of the main benefits of Sequel is that it doesn't require the user to know SQL in order to use it, though SQL knowledge is certainly helpful. Unlike most other Sequel documentation, this guide assumes you know SQL, and provides an easy way to discover how to do something in Sequel given the knowledge of how to do so in SQL.
== You Can Just Use SQL
With Sequel, it's very easy to just use SQL for your queries. If learning Sequel's DSL seems like a waste of time, you are certainly free to write all your queries in SQL. Sequel uses a few different methods depending on the type of query you are doing.
=== SELECT
For SELECT queries, you should probably use Database#fetch with a string and a block:
DB.fetch("SELECT * FROM albums") do |row|
puts row[:name]
end
Database#fetch will take the query you give it, execute it on the database, and yield a hash with column symbol keys for each row returned. If you want to use some placeholder variables, you can set the placeholders with ? and add the corresponding arguments to +fetch+:
DB.fetch("SELECT * FROM albums WHERE name LIKE ?", 'A%') do |row|
puts row[:name]
end
You can also use named placeholders by starting the placeholder with a colon, and using a hash for the argument:
DB.fetch("SELECT * FROM albums WHERE name LIKE :pattern", pattern: 'A%') do |row|
puts row[:name]
end
This can be helpful for long queries where it is difficult to match the question marks in the query with the arguments.
What Sequel actually does internally is two separate things. It first creates a dataset representing the query, and then it executes the dataset's SQL code to retrieve the objects. Often, you want to define a dataset at some point, but not execute it until later. You can do this by leaving off the block, and storing the dataset in a variable:
ds = DB.fetch("SELECT * FROM albums")
Then, when you want to retrieve the rows later, you can call +each+ on the dataset to retrieve the rows:
ds.each{|r| puts r[:name]}
You should note that Database#[] calls Database#fetch if a string is provided, so you can also do:
ds = DB["SELECT * FROM albums"]
ds.each{|r| puts r[:name]}
However, note that Database#[] cannot take a block directly, you have to call +each+ on the returned dataset. There are plenty of other methods besides +each+. For example, the +all+ method returns all records in the dataset as an array:
DB["SELECT * FROM albums"].all # [{:id=>1, :name=>'RF', ...}, ...]
=== INSERT, UPDATE, DELETE
INSERT, UPDATE, and DELETE all work the same way. You first create the dataset with the SQL you want to execute using Database#[]:
insert_ds = DB["INSERT INTO albums (name) VALUES (?)", 'RF']
update_ds = DB["UPDATE albums SET name = ? WHERE name = ?", 'MO', 'RF']
delete_ds = DB["DELETE FROM albums WHERE name = ?", 'MO']
Then, you call the +insert+, +update+, or +delete+ method on the returned dataset:
insert_ds.insert
update_ds.update
delete_ds.delete
+update+ and +delete+ should return the number of rows affected, and +insert+ should return the autogenerated primary key integer for the row inserted (if any).
=== Other Queries
All other queries such as TRUNCATE, CREATE TABLE, and ALTER TABLE should be executed using Database#run:
DB.run "CREATE TABLE albums (id integer primary key, name varchar(255))"
You can also use Database#<<:
DB << "ALTER TABLE albums ADD COLUMN copies_sold INTEGER"
=== Other Places
Almost everywhere in Sequel, you can drop down to literal SQL by providing a literal string, which you can create with Sequel.lit:
DB[:albums].select('name') # SELECT 'name' FROM albums
DB[:albums].select(Sequel.lit('name')) # SELECT name FROM albums
For a simpler way of creating literal strings, you can also use the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc], which adds the String#lit method, and other methods that integrate Sequel's DSL with the Ruby language:
DB[:albums].select('name'.lit)
So you can use Sequel's DSL everywhere you find it helpful, and fallback to literal SQL if the DSL can't do what you want or you just find literal SQL easier.
== Translating SQL Expressions into Sequel
The rest of this guide assumes you want to use Sequel's DSL to represent your query, that you know how to write the query in SQL, but you aren't sure how to write it in Sequel's DSL.
This section will describe how specific SQL expressions are handled in Sequel. The next section will discuss how to create queries by using method chaining on datasets.
=== Database#literal
It's important to get familiar with the Database#literal method, which will return the SQL that will be used for a given expression:
DB.literal(1)
# => "1"
DB.literal(:column)
# => "\"column\""
DB.literal('string')
# => "'string'"
Try playing around to see how different objects get literalized into SQL
=== Database Loggers
Some Sequel methods handle literalization slightly differently than Database#literal. If you want to see all SQL queries that Sequel is sending to the database, you should add a database logger:
DB.loggers << Logger.new($stdout)
Now that you know how to see what SQL is being used, let's jump in and see how to map SQL syntax to Sequel syntax:
=== Identifiers
In Sequel, SQL identifiers are usually specified as Ruby symbols:
:column # "column"
As you can see, Sequel quotes identifiers by default. Depending on your database, it may uppercase them by default as well:
:column # "COLUMN" on some databases
A plain symbol is usually treated as an unqualified identifier. However, if you are using multiple tables in a query, and you want to reference a column in one of the tables that has the same name as a column in another one of the tables, you need to qualify that reference. Note that you can't use a period to separate them:
:table.column # calls the column method on the symbol
Also note that specifying the period inside the symbol doesn't work if you are quoting identifiers:
:"table.column" # "table.column" instead of "table"."column"
There are a few different Sequel methods for creating qualified identifier objects. The recommended way is to explicitly create a qualified identifier by using Sequel.[] to create an identifier and call [] or +qualify+ on that, or by using the Sequel.qualify method with the table and column symbols:
Sequel[:table][:column] # "table"."column"
Sequel[:column].qualify(:table) # "table"."column"
Sequel.qualify(:table, :column) # "table"."column"
Another way to generate identifiers is to use Sequel's {virtual row support}[rdoc-ref:doc/virtual_rows.rdoc]:
DB[:albums].select{name} # SELECT "name" FROM "albums"
DB[:albums].select{albums[:name]} # SELECT "albums"."name" FROM "albums"
You can also use the symbol_aref extension for creating qualified identifiers:
Sequel.extension :symbol_aref
:table[:column] # "table"."column"
=== Numbers
In general, Ruby numbers map directly to SQL numbers:
# Integers
1 # 1
-1 # -1
# Floats
1.5 # 1.5
# BigDecimals
BigDecimal.new('1000000.123091029') # 1000000.123091029
=== Strings
In general, Ruby strings map directly to SQL strings:
'name' # 'name'
"name" # 'name'
=== Aliasing
You can use the Sequel.as method to create an alias, and the +as+ method on most Sequel-specific expression objects:
Sequel.as(:column, :alias) # "column" AS "alias"
Sequel[:column].as(:alias) # "column" AS "alias"
Sequel[:table][:column].as(:alias) # "table"."column" AS "alias"
(Sequel[:column] + 1).as(:alias) # ("column" + 1) AS "alias"
You can also use the symbol_as extension for creating aliased identifiers:
Sequel.extension :symbol_as
:column.as(:alias) # "column" AS "alias"
If you want to use a derived column list, you can provide an array of column aliases:
Sequel.as(:table, :alias, [:c1, :c2]) # "table" AS "alias"("c1", "c2")
=== Functions
The easiest way to use SQL functions is via a virtual row:
DB[:albums].select{func.function} # SELECT func() FROM "albums"
DB[:albums].select{func(col1, col2)} # SELECT func("col1", "col2") FROM "albums"
You can also use the Sequel.function method on the symbol that contains the function name:
Sequel.function(:func) # func()
Sequel.function(:func, :col1, :col2) # func("col1", "col2")
=== Aggregate Functions
Aggregate functions work the same way as normal functions, since they share the same syntax:
Sequel.function(:sum, :column) # sum(column)
To use the DISTINCT modifier to an aggregate function, call the +distinct+ method on the function expression, which returns a new function expression:
DB[:albums].select{sum(:column).distinct} # SELECT sum(DISTINCT column) FROM albums
If you want to use the wildcard as the sole argument of the aggregate function, use the * method on the function expression:
Sequel.function(:count).* # count(*)
DB[:albums].select{count.function.*} # SELECT count(*) FROM albums
Note that Sequel provides helper methods for aggregate functions such as +count+, +sum+, +min+, +max+, +avg+, and +group_and_count+, which handle common uses of aggregate functions.
=== Window Functions
If the database supports window functions, Sequel can handle them by calling the +over+ method on a function expression:
DB[:albums].select{row_number.function.over}
# SELECT row_number() OVER () FROM albums
DB[:albums].select{count.function.*.over}
# SELECT count(*) OVER () FROM albums
DB[:albums].select{function(:col1).over(partition: col2, order: col3)}
# SELECT function(col1) OVER (PARTITION BY col2 ORDER BY col3) FROM albums
DB[:albums].select{function(c1, c2).over(partition: [c3, c4], order: [c5, c6.desc])}
# SELECT function(c1, c2) OVER (PARTITION BY c3, c4 ORDER BY c5, c6 DESC) FROM albums
DB[:albums].select{function(c1).over(partition: c2, order: :c3, frame: :rows)}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3 ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM albums
DB[:albums].select{function(c1).over(partition: c2, order: :c3, frame: {type: :range, start: 1, end: 1})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM albums
DB[:albums].select{function(c1).over(partition: c2, order: :c3, frame: {type: :groups, start: [2, :preceding], end: [1, :preceding]})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3 GROUPS BETWEEN 2 PRECEDING AND 1 PRECEDING) FROM albums
DB[:albums].select{function(c1).over(partition: c2, order: :c3, frame: {type: :range, start: :preceding, exclude: :current})}
# SELECT function(c1) OVER (PARTITION BY c2 ORDER BY c3 RANGE UNBOUNDED PRECEDING EXCLUDE CURRENT ROW) FROM albums
=== Schema Qualified Functions
If the database supports schema qualified functions, Sequel can handle them by calling the +function+ method on a qualified identifier:
DB[:albums].select{schema[:function].function}
# SELECT schema.function() FROM albums
DB[:albums].select{schema[:function].function(:col, 2, "a")}
# SELECT schema.function(col, 2, 'a') FROM albums
=== Portable/Emulated Functions
Sequel offers some support for portable SQL functions, allowing you to call standard SQL functions, where Sequel will emulate support on databases that lack native support.
Some examples are:
Sequel.char_length(:column) # char_length(column)
Sequel.extract(:year, :column) # extract(year FROM column)
Sequel.trim(:column) # trim(column)
=== Equality Operator (=)
Sequel uses hashes to specify equality:
{column: 1} # ("column" = 1)
You can also specify this as an array of two element arrays:
[[:column, 1]] # ("column" = 1)
For expression objects, you can also use the =~ method:
where{column =~ 1} # ("column" = 1)
=== Not Equal Operator (!=)
You can specify a not equals condition by inverting the hash or array of two element arrays using Sequel.negate or Sequel.~:
Sequel.negate(column: 1) # ("column" != 1)
Sequel.negate([[:column, 1]]) # ("column" != 1)
Sequel.~(column: 1) # ("column" != 1)
Sequel.~([[:column, 1]]) # ("column" != 1)
The difference between the two is that +negate+ only works on hashes and arrays of element arrays, and it negates all entries in the hash or array, while ~ does a general inversion. This is best shown by an example with multiple entries:
Sequel.negate(column: 1, foo: 2) # (("column" != 1) AND (foo != 2))
Sequel.~(column: 1, foo: 2) # (("column" != 1) OR (foo != 2))
You can also use the ~ method on an equality expression:
where{~(column =~ 1)} # ("column" != 1)
Or you can use the !~ method:
where{column !~ 1} # ("column" != 1)
The most common need for not equals is in filters, in which case you can use the +exclude+ method:
DB[:albums].exclude(column: 1) # SELECT * FROM "albums" WHERE ("column" != 1)
Note that +exclude+ does a generalized inversion, similar to Sequel.~.
=== Inclusion and Exclusion Operators (IN, NOT IN)
Sequel also uses hashes to specify inclusion, and inversions of those hashes to specify exclusion:
{column: [1, 2, 3]} # ("column" IN (1, 2, 3))
Sequel.~(column: [1, 2, 3]) # ("column" NOT IN (1, 2, 3))
As you may have guessed, Sequel switches from an = to an IN when the hash value is an array. It also does this for datasets, which easily allows you to test for inclusion and exclusion in a subselect:
{column: DB[:albums].select(:id)} # ("column" IN (SELECT "id" FROM "albums"))
Sequel.~(column: DB[:albums].select(:id)) # ("column" NOT IN (SELECT "id" FROM "albums"))
Similar to =, you can also use =~ with expressions for inclusion:
where{column =~ [1, 2, 3]} # ("column" IN (1, 2, 3))
and !~ for exclusion:
where{column !~ [1, 2, 3]} # ("column" NOT IN (1, 2, 3))
Sequel also supports the SQL EXISTS operator using Dataset#exists:
DB[:albums].exists # EXISTS (SELECT * FROM albums)
=== Identity Operators (IS, IS NOT)
Hashes in Sequel use IS if the value is +true+, +false+, or +nil+:
{column: nil} # ("column" IS NULL)
{column: true} # ("column" IS TRUE)
{column: false} # ("column" IS FALSE)
Negation works the same way as it does for equality and inclusion:
Sequel.~(column: nil) # ("column" IS NOT NULL)
Sequel.~(column: true) # ("column" IS NOT TRUE)
Sequel.~(column: false) # ("column" IS NOT FALSE)
Likewise, =~ works for identity and !~ for negative identity on expressions:
where{column =~ nil} # ("column" IS NULL)
where{column !~ nil} # ("column" IS NOT NULL)
=== Inversion Operator (NOT)
Sequel's general inversion operator is ~, which works on symbols and most Sequel-specific expression objects:
Sequel.~(:column) # NOT "column"
Note that ~ will actually apply the inversion operation to the underlying object, which is why
Sequel.~(column: 1)
produces (column != 1) instead of NOT (column = 1).
=== Inequality Operators (< > <= >=)
Sequel defines the inequality operators directly on most Sequel-specific expression objects:
Sequel[:table][:column] > 1 # ("table"."column" > 1)
Sequel[:table][:column] < 1 # ("table"."column" < 1)
Sequel.function(:func) >= 1 # (func() >= 1)
Sequel.function(:func, :column) <= 1 # (func("column") <= 1)
If you want to use them on a symbol, you should call Sequel.[] with the symbol to get an expression object:
Sequel[:column] > 1 # ("column" > 1)
A common use of virtual rows is to handle inequality operators:
DB[:albums].where{col1 > col2} # SELECT * FROM "albums" WHERE ("col1" > "col2")
=== Standard Mathematical Operators (+ - * /)
The standard mathematical operates are defined on most Sequel-specific expression objects:
Sequel[:column] + 1 # "column" + 1
Sequel[:table][:column] - 1 # "table"."column" - 1
Sequel[:table][:column] * 1 # "table"."column" * 1
Sequel[:column] / 1 # "column" / 1
Sequel[:column] ** 1 # power("column", 1)
You can also call the operator methods directly on the Sequel module:
Sequel.+(:column, 1) # "column" + 1
Sequel.-(Sequel[:table][:column], 1) # "table"."column" - 1
Sequel.*(Sequel[:table][:column], 1) # "table"."column" * 1
Sequel./(:column, 1) # "column" / 1
Sequel.**(:column, 1) # power("column", 1)
Note that since Sequel implements support for Ruby's coercion protocol, the following also works:
1 + Sequel[:column]
1 - Sequel[:table][:column]
=== Boolean Operators (AND OR)
Sequel defines the & and | methods on most Sequel-specific expression objects to handle AND and OR:
Sequel[:column1] & :column2 # ("column1" AND "column2")
Sequel[{column1: 1}] | {column2: 2} # (("column1" = 1) OR ("column2" = 2))
(Sequel.function(:func) > 1) & :column3 # ((func() > 1) AND "column3")
Note the use of parentheses in the last statement. If you omit them, you won't get what you expect.
Because & has higher precedence than >
Sequel.function(:func) > 1 & :column3
is parsed as:
Sequel.function(:func) > (1 & :column3)
You can also use the Sequel.& and Sequel.| methods:
Sequel.&(:column1, :column2) # ("column1" AND "column2")
Sequel.|({column1: 1}, {column2: 2}) # (("column1" = 1) OR ("column2" = 2))
You can use hashes and arrays of two element arrays to specify AND and OR with equality conditions:
{column1: 1, column2: 2} # (("column1" = 1) AND ("column2" = 2))
[[:column1, 1], [:column2, 2]] # (("column1" = 1) AND ("column2" = 2))
As you can see, these literalize with ANDs by default. You can use the Sequel.or method to use OR instead:
Sequel.or(column1: 1, column2: 2) # (("column1" = 1) OR ("column2" = 2))
As you can see in the above examples, Sequel.| and Sequel.or work differently.
Sequel.| is for combining an arbitrary number of expressions using OR. If you pass a single
argument, Sequel.| will just convert it to a Sequel expression, similar to Sequel.expr.
Sequel.or is for taking a single hash or array of two element arrays and combining the
elements of that single argument using OR instead of AND:
Sequel.|(column1: 1, column2: 2) # (("column1" = 1) AND ("column2" = 2))
Sequel.or(column1: 1, column2: 2) # (("column1" = 1) OR ("column2" = 2))
Sequel.|({column1: 1}, {column2: 2}) # (("column1" = 1) OR ("column2" = 2))
Sequel.or({column1: 1}, {column2: 2}) # ArgumentError
You've already seen the Sequel.negate method, which will use ANDs if multiple entries are used:
Sequel.negate(column1: 1, column2: 2) # (("column1" != 1) AND ("column2" != 2))
To negate while using ORs, the Sequel.~ operator can be used:
Sequel.~(column1: 1, column2: 2) # (("column1" != 1) OR ("column2" != 2))
Note again that Dataset#exclude uses ~, not +negate+:
DB[:albums].exclude(column1: 1, column2: 2) # SELECT * FROM "albums" WHERE (("column" != 1) OR ("column2" != 2))
=== Casts
Casting in Sequel is done with the +cast+ method, which is available on most of the Sequel-specific expression objects:
Sequel[:name].cast(:text) # CAST("name" AS text)
Sequel['1'].cast(:integer) # CAST('1' AS integer)
Sequel[:table][:column].cast(:date) # CAST("table"."column" AS date)
You can also use the Sequel.cast method:
Sequel.cast(:name, :text) # CAST("name" AS text)
=== Bitwise Mathematical Operators (& | ^ << >> ~)
Sequel allows the use of bitwise mathematical operators on Sequel::SQL::NumericExpression objects:
Sequel[:number] + 1 # => #
(Sequel[:number] + 1) & 5 # (("number" + 1) & 5)
As you can see, when you use the + operator on a symbol, you get a NumericExpression. You can turn an expression a NumericExpression using +sql_number+:
Sequel[:number].sql_number | 5 # ("number" | 5)
Sequel.function(:func).sql_number << 7 # (func() << 7)
Sequel.cast(:name, :integer).sql_number >> 8 # (CAST("name" AS integer) >> 8)
Sequel allows you to do the cast and conversion at the same time via +cast_numeric+:
Sequel[:name].cast_numeric ^ 9 # (CAST("name" AS integer) ^ 9)
Note that &, |, and ~ are already defined to do AND, OR, and NOT on most expressions, so if you want to use the bitwise operators, you need to make sure that they are converted first:
~Sequel[:name] # NOT "name"
~Sequel[:name].sql_number # ~"name"
=== String Operators (||, LIKE, Regexp)
Sequel allows the use of the string concatenation operator on Sequel::SQL::StringExpression objects, which can be created using the +sql_string+ method on an expression:
Sequel[:name].sql_string + ' - Name' # ("name" || ' - Name')
Just like for the bitwise operators, Sequel allows you to do the cast and conversion at the same time via +cast_string+:
Sequel[:number].cast_string + ' - Number' # (CAST(number AS varchar(255)) || ' - Number')
Note that similar to the mathematical operators, you cannot switch the order the expression and have it work:
'Name - ' + Sequel[:name].sql_string # raises TypeError
Just like for the mathematical operators, you can use Sequel.[] to wrap the object:
Sequel['Name - '] + :name # ('Name - ' || "name")
The Sequel.join method concatenates all of the elements in the array:
Sequel.join(['Name', :name]) # ('Name' || "name")
Just like Ruby's String#join, you can provide an argument for a string used to join each element:
Sequel.join(['Name', :name], ' - ') # ('Name' || ' - ' || "name")
For the LIKE operator, Sequel defines the +like+ and +ilike+ methods on most Sequel-specific expression objects:
Sequel[:name].like('A%') # ("name" LIKE 'A%' ESCAPE '\')
Sequel[:name].ilike('A%') # ("name" ILIKE 'A%' ESCAPE '\')
You can also use the Sequel.like and Sequel.ilike methods:
Sequel.like(:name, 'A%') # ("name" LIKE 'A%' ESCAPE '\')
Sequel.ilike(:name, 'A%') # ("name" ILIKE 'A%' ESCAPE '\')
Note the above syntax for +ilike+, while Sequel's default, is specific to PostgreSQL. However, most other adapters override the behavior. For example, on MySQL, Sequel uses LIKE BINARY for +like+, and LIKE for +ilike+. If the database supports both case sensitive and case insensitive LIKE, then +like+ will use a case sensitive LIKE, and +ilike+ will use a case insensitive LIKE.
Inverting the LIKE operator works like other inversions:
~Sequel.like(:name, 'A%') # ("name" NOT LIKE 'A%' ESCAPE '\')
Sequel also supports SQL regular expressions on MySQL and PostgreSQL (and SQLite when using the sqlite adapter with the :setup_regexp_function Database option). You can use these by passing a Ruby regular expression to +like+ or +ilike+, or by making the regular expression a hash value:
Sequel.like(:name, /^A/) # ("name" ~ '^A')
~Sequel.ilike(:name, /^A/) # ("name" !~* '^A')
{name: /^A/i} # ("name" ~* '^A')
Sequel.~(name: /^A/) # ("name" !~ '^A')
Note that using +ilike+ with a regular expression will always make the regexp case insensitive. If you use +like+ or the hash with regexp value, it will only be case insensitive if the Regexp itself is case insensitive.
=== Order Specifications (ASC, DESC)
Sequel supports specifying ascending or descending order using the +asc+ and +desc+ method on most Sequel-specific expression objects:
Sequel[:column].asc # "column" ASC
Sequel[:table][:column].desc # "table"."column" DESC
You can also use the Sequel.asc and Sequel.desc methods:
Sequel.asc(:column) # "column" ASC
Sequel.desc(Sequel[:table][:column]) # "table"."column" DESC
On some databases, you can specify null ordering:
Sequel.asc(:column, nulls: :first) # "column" ASC NULLS FIRST
Sequel.desc(Sequel[:table][:column], nulls: :last) # "table"."column" DESC NULLS LAST
=== All Columns (.*)
To select all columns in a table, Sequel supports the * method on identifiers and qualified identifiers without an argument:
Sequel[:table].* # "table".*
Sequel[:schema][:table].* # "schema"."table".*
=== CASE statements
Sequel supports SQL CASE statements using the Sequel.case method. The first argument is a hash or array of two element arrays representing the conditions, the second argument is the default value (ELSE). The keys of the hash (or first element in each array) is the WHEN condition, and the values of the hash (or second element in each array) is the THEN result. Here are some examples:
Sequel.case({column: 1}, 0) # (CASE WHEN "column" THEN 1 ELSE 0 END)
Sequel.case([[:column, 1]], 0) # (CASE WHEN "column" THEN 1 ELSE 0 END)
Sequel.case({{column: nil}=>1}, 0) # (CASE WHEN (column IS NULL) THEN 1 ELSE 0 END)
If the hash or array has multiple arguments, multiple WHEN clauses are used:
Sequel.case({c: 1, d: 2}, 0) # (CASE WHEN "c" THEN 1 WHEN "d" THEN 2 ELSE 0 END)
Sequel.case([[:c, 1], [:d, 2]], 0) # (CASE WHEN "c" THEN 1 WHEN "d" THEN 2 ELSE 0 END)
If you provide a 3rd argument to Sequel.case, it goes between CASE and WHEN:
Sequel.case({2=>1, 3=>5}, 0, :column) # (CASE column WHEN 2 THEN 1 WHEN 3 THEN 5 ELSE 0 END)
=== Subscripts/Array Access ([])
Sequel supports SQL subscripts using the +sql_subscript+ method on most Sequel-specific expression objects:
Sequel[:column].sql_subscript(3) # column[3]
Sequel[:table][:column].sql_subscript(3) # table.column[3]
You can also use the Sequel.subscript method:
Sequel.subscript(:column, 3) # column[3]
Just like in SQL, you can use any expression as a subscript:
Sequel.subscript(:column, Sequel.function(:func)) # column[func()]
== Building Queries in Sequel
In Sequel, the SQL queries are build with method chaining.
=== Creating Datasets
You generally start creating a dataset by calling Dataset#[] with a symbol specifying the table name:
DB[:albums] # SELECT * FROM albums
If you want to select from multiple FROM tables, use multiple arguments:
DB[:albums, :artists] # SELECT * FROM albums, artists
If you don't want to select from any FROM tables, just call +dataset+:
DB.dataset # SELECT *
=== Chaining Methods
Once you have your dataset object, you build queries by chaining methods, usually with one method per clause in the query:
DB[:albums].select(:id, :name).where(Sequel.like(:name, 'A%')).order(:name)
# SELECT id, name FROM albums WHERE (name LIKE 'A%' ESCAPE '\') ORDER BY name
Note that the order of your method chain is not usually important unless you have multiple methods that affect the same clause:
DB[:albums].order(:name).where(Sequel.like(:name, 'A%')).select(:id, :name)
# SELECT id, name FROM albums WHERE (name LIKE 'A%' ESCAPE '\') ORDER BY name
=== Using the Same Dataset for SELECT, INSERT, UPDATE, and DELETE
Also note that while the SELECT clause is displayed when you look at a dataset, a Sequel dataset can be used for INSERT, UPDATE, and DELETE as well. Here's an example:
ds = DB[:albums]
ds.all # SELECT * FROM albums
ds.insert(name: 'RF') # INSERT INTO albums (name) VALUES ('RF')
ds.update(name: 'RF') # UPDATE albums SET name = 'RF'
ds.delete # DELETE FROM albums
In general, the +insert+, +update+, and +delete+ methods use the appropriate clauses you defined on the dataset:
ds = DB[:albums].where(id: 1)
ds.all # SELECT * FROM albums WHERE (id = 1)
ds.insert(name: 'RF') # INSERT INTO albums (name) VALUES ('RF')
ds.update(name: 'RF') # UPDATE albums SET name = 'RF' WHERE (id = 1)
ds.delete # DELETE FROM albums WHERE (id = 1)
Note how +update+ and +delete+ used the +where+ argument, but +insert+ did not, because INSERT doesn't use a WHERE clause.
=== Methods Used for Each SQL Clause
To see which methods exist that affect each SQL clause, see the {"Dataset Basics" guide}[rdoc-ref:doc/dataset_basics.rdoc].
sequel-5.63.0/doc/testing.rdoc 0000664 0000000 0000000 00000025232 14342141206 0016177 0 ustar 00root root 0000000 0000000 = Testing with Sequel
Whether or not you use Sequel in your application, you are usually going to want to have tests that ensure that your code works. When you are using Sequel, it's helpful to integrate it into your testing framework, and it's generally best to run each test in its own transaction if possible. That keeps all tests isolated from each other, and it's simple as it handles all of the cleanup for you. Sequel doesn't ship with helpers for common libraries, as the exact code you need is often application-specific, but this page offers some examples that you can either use directly or build on.
== Transactional tests
These run each test in its own transaction, the recommended way to test.
=== minitest/spec
==== with minitest-hooks
require 'minitest/hooks/default'
DB = Sequel.postgres # change if using sqlite etc
class Minitest::HooksSpec
def around
DB.transaction(rollback: :always, auto_savepoint: true){super}
end
end
==== without minitest-hooks
DB = Sequel.postgres # change if using sqlite etc
class Minitest::Spec
def run(*args, &block)
DB.transaction(rollback: :always, auto_savepoint: true){super}
end
end
=== minitest/test
DB = Sequel.postgres # change if using sqlite etc
# Use this class as the base class for your tests
class SequelTestCase < Minitest::Test
def run(*args, &block)
DB.transaction(rollback: :always, auto_savepoint: true){super}
end
end
=== rspec >= 2.8
DB = Sequel.postgres # change the database if you are using sqlite etc.
RSpec.configure do |c|
c.around(:each) do |example|
DB.transaction(rollback: :always, auto_savepoint: true){example.run}
end
end
== Transactional testing with multiple databases
You can use the Sequel.transaction method to run a transaction on multiple databases, rolling all of them back. Instead of:
DB.transaction(rollback: :always)
Use Sequel.transaction with an array of databases:
Sequel.transaction([DB1, DB2, DB3], rollback: :always)
== Transactional testing with savepoints
Using minitest/spec and minitest-hooks, and assuming your database supports it, you can use
transactions around entire test suites, using savepoints around each test. This can sigificantly
speed up any test suite where there is a lot of shared setup in a before all hook. By using
savepoints per test, each test is isolated from each other, rolling back changes after it
completes, and by using transactions per test suite, you only pay the cost to load the data once
for the test suite, and it is automatically rolled back after the test suite completes.
Example:
require 'minitest/hooks/default'
class Minitest::HooksSpec
def around
DB.transaction(rollback: :always, savepoint: true, auto_savepoint: true){super}
end
def around_all
DB.transaction(rollback: :always){super}
end
end
describe "some large test suite" do
before(:all) do
DB[:table].import # Large number of rows
end
end
== Nontransactional tests
In some cases, it is not possible to use transactions. For example, if you are testing a web application that is running in a separate process, you don't have access to that process's database connections, so you can't run your examples in transactions. In that case, the best way to handle things is to cleanup after each test by deleting or truncating the database tables used in the test.
The order in which you delete/truncate the tables is important if you are using referential integrity in your database (which you should be doing). If you are using referential integrity, you need to make sure to delete in tables referencing other tables before the tables that are being referenced. For example, if you have an +albums+ table with an +artist_id+ field referencing the +artists+ table, you want to delete/truncate the +albums+ table before the +artists+ table. Note that if you have cyclic references in your database, you will probably need to write your own custom cleaning code.
=== minitest/spec or rspec
describe "some test suite" do
after do
[:table1, :table2].each{|x| DB.from(x).truncate}
# or
[:table1, :table2].each{|x| DB.from(x).delete}
end
end
=== minitest/test
class SomeTestClass < Minitest::Test
def teardown
[:table1, :table2].each{|x| DB.from(x).truncate}
# or
[:table1, :table2].each{|x| DB.from(x).delete}
end
end
= Testing Sequel Itself
Sequel has multiple separate test suites. All test suites use minitest/spec, with the minitest-hooks and minitest-global_expectations extensions. To install the dependencies necessary to test Sequel, run gem install --development sequel.
== rake
The default rake task runs Sequel's core, model, plugin, and extension specs, the same as rake spec or rake spec_core spec_model spec_plugin.
== rake spec_core
The +spec_core+ rake task runs Sequel's core specs. These specs use a mocked database connection, and test for specific SQL used and for generally correct behavior.
== rake spec_model
The +spec_model+ rake task runs Sequel's model specs. These specs also use a mocked database connection, and operate similar to the core tests.
== rake spec_plugin
The +spec_plugin+ rake task runs the specs for the plugins and extensions that ship with Sequel. These also use a mocked database connection, and operate very similarly to the general Sequel core and model specs.
== rake spec_core_ext
The +spec_core_ext+ rake task runs the specs for the core_extensions extension. These are run separately from the other extension tests to make sure none of the other extensions require the core_extensions.
== rake spec_bin
The +spec_bin+ rake task runs the specs for bin/sequel. These use an SQLite3 database, and require either the sqlite3 (non-JRuby) or jdbc-sqlite3 (JRuby) gem.
== rake spec_adapter (e.g. rake spec_postgres)
The spec_adapter specs run against a real database connection with nothing mocked, and test for correct results. They are slower than the standard specs, but they will catch errors that are mocked out by the default specs, as well as show issues that only occur on a certain database, adapter, or a combination of the two.
These specs are broken down into two parts. For each database, there are specific specs that only apply to that database, and these are called the adapter specs. There are also shared specs that apply to all (or almost all) databases, these are called the integration specs. For database types that don't have specific adapter tests, you can use rake spec_integration to just run the shared integration tests.
Each adapter needs a specific gem installed in order to run. Please see the {connecting to a database guide}[rdoc-ref:doc/opening_databases.rdoc] for which gem you need to install for the adapter you are testing.
== Environment variables
Sequel uses environment variables when testing to specify either the database to be tested or specify how testing should be done. You can also specify the databases to test by copying spec/spec_config.rb.example to spec/spec_config.rb and modifying it. See that file for details. It may be necessary to use +spec_config.rb+ as opposed to an environment variable if your database connection cannot be specified by a connection string.
Sequel does not create test databases automatically, except for file-based databases such as SQLite/H2/HSQLDB/Derby. It's up to the user to create the test databases manually and give Sequel a valid connection string in an environment variable (or setup the connection object in +spec_config.rb+).
=== Connection Strings
The SEQUEL_INTEGRATION_URL environment variable specifies the Database connection URL to use for the adapter and integration specs. Additionally, when running the adapter specs, you can also use the SEQUEL_ADAPTER_URL environment variable (e.g. SEQUEL_POSTGRES_URL for spec_postgres).
=== Other
SEQUEL_ASYNC_THREAD_POOL :: Use the async_thread_pool extension when running the specs
SEQUEL_ASYNC_THREAD_POOL_PREEMPT :: Use the async_thread_pool extension when running the specs, with the :preempt_async_thread option
SEQUEL_CHECK_PENDING :: Try running all specs (note, can cause lockups for some adapters), and raise errors for skipped specs that don't fail
SEQUEL_COLUMNS_INTROSPECTION :: Use the columns_introspection extension when running the specs
SEQUEL_CONCURRENT_EAGER_LOADING :: Use the async_thread_pool extension and concurrent_eager_loading plugin when running the specs
SEQUEL_CONNECTION_VALIDATOR :: Use the connection validator extension when running the specs
SEQUEL_DUPLICATE_COLUMNS_HANDLER :: Use the duplicate columns handler extension with value given when running the specs
SEQUEL_ERROR_SQL :: Use the error_sql extension when running the specs
SEQUEL_FIBER_CONCURRENCY :: Use the fiber_concurrency extension when running the adapter and integration specs
SEQUEL_FREEZE_DATABASE :: Freeze the database before running the integration specs
SEQUEL_IDENTIFIER_MANGLING :: Use the identifier_mangling extension when running the specs
SEQUEL_INDEX_CACHING :: Use the index_caching extension when running the specs
SEQUEL_INTEGER64 :: Use the integer64 extension when running the adapter or integration specs
SEQUEL_MODEL_PREPARED_STATEMENTS :: Use the prepared_statements plugin when running the specs
SEQUEL_MODEL_THROW_FAILURES :: Use the throw_failures plugin when running the specs
SEQUEL_NO_CACHE_ASSOCIATIONS :: Don't cache association metadata when running the specs
SEQUEL_NO_PENDING :: Don't skip any specs, try running all specs (note, can cause lockups for some adapters)
SEQUEL_PG_AUTO_PARAMETERIZE :: Use the pg_auto_parameterize extension when running the postgres specs
SEQUEL_PG_TIMESTAMPTZ :: Use the pg_timestamptz extension when running the postgres specs
SEQUEL_PRIMARY_KEY_LOOKUP_CHECK_VALUES :: Use the primary_key_lookup_check_values extension when running the adapter or integration specs
SEQUEL_QUERY_PER_ASSOCIATION_DB_0_URL :: Run query-per-association integration tests with multiple databases (all 4 must be set to run)
SEQUEL_QUERY_PER_ASSOCIATION_DB_1_URL :: Run query-per-association integration tests with multiple databases (all 4 must be set to run)
SEQUEL_QUERY_PER_ASSOCIATION_DB_2_URL :: Run query-per-association integration tests with multiple databases (all 4 must be set to run)
SEQUEL_QUERY_PER_ASSOCIATION_DB_3_URL :: Run query-per-association integration tests with multiple databases (all 4 must be set to run)
SEQUEL_SPLIT_SYMBOLS :: Turn on symbol splitting when running the adapter and integration specs
SEQUEL_SYNCHRONIZE_SQL :: Use the synchronize_sql extension when running the specs
SEQUEL_TZINFO_VERSION :: Force the given tzinfo version when running the specs (e.g. '>=2')
sequel-5.63.0/doc/thread_safety.rdoc 0000664 0000000 0000000 00000003256 14342141206 0017346 0 ustar 00root root 0000000 0000000 = Thread Safety
Most Sequel usage (and all common Sequel usage) is thread safe by default. Specifically, multiple threads can operate on Database instances, Dataset instances, and Model classes concurrently without problems. In general, Database instance and Model classes are not modified after application startup, and Dataset instances are always frozen.
== Connection Pool
In order to allow multiple threads to operate on the same database at the same time, Sequel uses a connection pool. The connection pool is designed so that a thread uses a connection for the minimum amount of time, returning the connection to the pool as soon as it is done using the connection. If a thread requests a connection and the pool does not have an available connection, a new connection will be created. If the maximum number of connections in the pool has already been reached, the thread will block until a connection is available or the connection pool timeout has elapsed (in which case a Sequel::PoolTimeout error will be raised).
== Exceptions
This is a small list of things that are specifically non thread-safe. This is not an exhaustive list, there may be cases not mentioned here.
1) Model instances: Model instances are not thread-safe unless they are frozen first. Multiple threads should not operate on an unfrozen model instance concurrently.
2) Model class modifications: Model class modifications, such as adding associations and loading plugins, are not designed to be thread safe. You should not modify a class in one thread if any other thread can concurrently access it. Model subclassing is designed to be thread-safe, so you create a model subclass in a thread and modify it safely.
sequel-5.63.0/doc/transactions.rdoc 0000664 0000000 0000000 00000023312 14342141206 0017227 0 ustar 00root root 0000000 0000000 = Database Transactions
Sequel uses autocommit mode by default for all of its database adapters, so in general in Sequel if you want to use database transactions, you need to be explicit about it. There are a few cases where transactions are used implicitly by default:
* Dataset#import to insert many records at once
* Dataset#paged_each to iterate over large datasets in batches
* Model#save
* Model#destroy
* Migrations if the database supports transactional schema
* Database#use_cursor in the postgres adapter
* Dataset#lock on PostgreSQL if given a block
* setter methods created by the association_pks plugin
* move* methods in the list plugin
Everywhere else, it is up to you to use a database transaction if you want to.
== Basic Transaction Usage
In Sequel, the Database#transaction method should be called if you want to use a database transaction. This method must be called with a block. If the block does not raise an exception, the transaction is committed:
DB.transaction do # BEGIN
DB[:foo].insert(1) # INSERT
end # COMMIT
If the block raises a Sequel::Rollback exception, the transaction is rolled back, but no exception is raised outside the block:
DB.transaction do # BEGIN
raise Sequel::Rollback
end # ROLLBACK
# no exception raised
If any other exception is raised, the transaction is rolled back, and the exception is raised outside the block:
DB.transaction do # BEGIN
raise ArgumentError
end # ROLLBACK
# ArgumentError raised
If you want the current transaction to be rolled back when the transaction block exits instead of being committed (even if an exception is not raised), use Database#rollback_on_exit
DB.transaction do # BEGIN
DB.rollback_on_exit
end # ROLLBACK
If you want Sequel::Rollback exceptions to be reraised, use the rollback: :reraise option:
DB.transaction(rollback: :reraise) do # BEGIN
raise Sequel::Rollback
end # ROLLBACK
# Sequel::Rollback raised
If you always want to rollback (useful for testing), use the rollback: :always option:
DB.transaction(rollback: :always) do # BEGIN
DB[:foo].insert(1) # INSERT
end # ROLLBACK
# no exception raised
If you want to check whether you are currently in a transaction, use the Database#in_transaction? method:
DB.in_transaction? # false
DB.transaction do
DB.in_transaction? # true
end
== Transaction Hooks
You can add hooks to an in progress transaction that are called after the transaction commits or rolls back:
x = nil
DB.transaction do
DB.after_commit{x = 1}
DB.after_rollback{x = 2}
x # nil
end
x # 1
x = nil
DB.transaction do
DB.after_commit{x = 1}
DB.after_rollback{x = 2}
raise Sequel::Rollback
end
x # 2
== Nested Transaction Calls / Savepoints
You can nest calls to transaction, which by default just reuses the existing transaction:
DB.transaction do # BEGIN
DB.transaction do
DB[:foo].insert(1) # INSERT
end
end # COMMIT
You can use the savepoint: true option in the inner transaction to explicitly use a savepoint (if the database supports it):
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB[:foo].insert(1) # INSERT
end # RELEASE SAVEPOINT
end # COMMIT
You can use the auto_savepoint: true option in the outer transaction to explicitly use a savepoint in the inner transaction (if the database supports it):
DB.transaction(auto_savepoint: true) do # BEGIN
DB.transaction do # SAVEPOINT
DB[:foo].insert(1) # INSERT
end # RELEASE SAVEPOINT
end # COMMIT
If a Sequel::Rollback exception is raised inside the savepoint block, it will only rollback to the savepoint:
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
raise Sequel::Rollback
end # ROLLBACK TO SAVEPOINT
# no exception raised
end # COMMIT
Other exceptions, unless rescued inside the outer transaction block, will rollback the savepoint and the outer transactions, since they are reraised by the transaction code:
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
raise ArgumentError
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK
# ArgumentError raised
If you want the current savepoint to be rolled back when the savepoint block exits instead of being committed (even if an exception is not raised), use Database#rollback_on_exit(savepoint: true)
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: true)
end # ROLLBACK TO SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: true)
end # ROLLBACK TO SAVEPOINT
end # RELEASE SAVEPOINT
end # COMMIT
If you want the current savepoint and potentially enclosing savepoints to be rolled back when the savepoint blocks exit (even if an exception is not raised), use Database#rollback_on_exit(savepoint: integer)
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: 2)
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK TO SAVEPOINT
end # COMMIT
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.transaction(savepoint: true) do # SAVEPOINT
DB.rollback_on_exit(savepoint: 3)
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK TO SAVEPOINT
end # ROLLBACK
=== Savepoint Hooks
When using savepoints, you can use the +:savepoint+ option to +after_commit+ or +after_rollback+ to use a savepoint hook. For +after_commit+, this will only run the hook after transaction commit if all enclosing savepoints are released (not rolled back). For +after_rollback+, this will run the hook after any enclosing savepoint is rolled back (before transaction commit), or after the transaction is rolled back if all enclosing savepoints are released:
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
x # nil
end # RELEASE SAVEPOINT
x # nil
end # COMMIT
x # 1
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
x # nil
raise Sequel::Rollback
end # ROLLBACK TO SAVEPOINT
x # 2
end # COMMIT
x # 2
x = nil
DB.transaction do # BEGIN
DB.transaction(savepoint: true) do # SAVEPOINT
DB.after_commit(savepoint: true){x = 1}
DB.after_rollback(savepoint: true){x = 2}
end # RELEASE SAVEPOINT
x # nil
raise Sequel::Rollback
end
x # 2
== Prepared Transactions / Two-Phase Commit
Sequel supports database prepared transactions on PostgreSQL, MySQL, and H2. With prepared transactions, at the end of the transaction, the transaction is not immediately committed (it acts like a rollback). Later, you can call +commit_prepared_transaction+ to commit the transaction or +rollback_prepared_transaction+ to roll the transaction back. Prepared transactions are usually used with distributed databases to make sure all databases commit the same transaction or none of them do.
To use prepared transactions in Sequel, you provide a string as the value of the :prepare option:
DB.transaction(prepare: 'foo') do # BEGIN
DB[:foo].insert(1) # INSERT
end # PREPARE TRANSACTION 'foo'
Later, you can commit the prepared transaction:
DB.commit_prepared_transaction('foo')
or roll the prepared transaction back:
DB.rollback_prepared_transaction('foo')
== Transaction Isolation Levels
The SQL standard supports 4 isolation levels: READ UNCOMMITTED, READ COMMITTED, REPEATABLE READ, and SERIALIZABLE. Not all databases implement the levels as specified in the standard (or implement the levels at all), but on most databases, you can specify which transaction isolation level you want to use via the :isolation option to Database#transaction. The isolation level is specified as one of the following symbols: :uncommitted, :committed, :repeatable, and :serializable. Using this option makes Sequel use the correct transaction isolation syntax for your database:
DB.transaction(isolation: :serializable) do # BEGIN
# SET TRANSACTION ISOLATION LEVEL SERIALIZABLE
DB[:foo].insert(1) # INSERT
end # COMMIT
== Automatically Restarting Transactions
Sequel offers the ability to automatically restart transactions if specific types of errors are detected. For example, if you want to automatically restart a transaction if a serialization failure is detected:
DB.transaction(isolation: :serializable, retry_on: [Sequel::SerializationFailure]) do
ModelClass.find_or_create(name: 'Foo')
end
At the serializable transaction isolation level, find_or_create may raises a Sequel::SerializationFailure exception if multiple threads simultaneously run that code. With the :retry_on option set, the transaction will be automatically retried until it succeeds.
Note that automatic retrying should not be used unless the entire transaction
block is idempotent, as otherwise it can cause non-idempotent
behavior to execute multiple times. For example, with the following code:
DB.transaction(isolation: :serializable, retry_on: [Sequel::SerializationFailure]) do
logger.info 'Ensuring existence of ModelClass with name Foo'
ModelClass.find_or_create(name: 'Foo')
end
The logger.info method will be called multiple times if there is a serialization failure.
The :num_retries option can be used to set the maximum number of times to retry. It is set to 5 times by default.
sequel-5.63.0/doc/validations.rdoc 0000664 0000000 0000000 00000065476 14342141206 0017055 0 ustar 00root root 0000000 0000000 = Model Validations
This guide is based on http://guides.rubyonrails.org/active_record_validations.html
== Overview
This guide is designed to teach you how to use Sequel::Model's validation support. It attempts
to explain how Sequel's validation support works, what validations are
useful for, and how to use the +validation_helpers+ plugin to add specific
types of validations to your models.
== Why Validations?
Validations are primarily useful for associating error messages to display to the user
with specific attributes on the model. It is also possible to use them to enforce
data integrity for model instances, but that's not recommended unless
the only way to modify the database is through model instances, or you have
complex data integrity requirements that aren't possible to specify via
database-level constraints.
== Data Integrity
Data integrity is best handled by the database itself. For example, if you have
a date column that should never contain a NULL value, the column should be
specified in the database as NOT NULL. If you have an integer column that should
only have values from 1 to 10, there should be a CHECK constraint that ensures
that the value of that column is between 1 and 10. And if you have a varchar
column where the length of the entries should be between 2 and 255, you should
be setting the size of the varchar column to 255, and using a CHECK constraint
to ensure that all values have at least two characters.
Unfortunately, sometimes there are situations where that is not possible. For
example, if you don't have control over the schema and cannot add constraints,
or you are using MySQL (which doesn't support CHECK constraints), it may be necessary to use a model validation
to enforce the database integrity.
In some cases you may have data integrity requirements that are difficult to
enforce via database constraints, especially if you are targetting multiple
database types.
Validations are generally easier to write than database constraints,
so if data integrity isn't of great importance, using validations to provide minimal
data integrity may be acceptable.
== Usage
Regardless of whether you are using validations for data integrity or just for
error messages, the usage is the same. Whenever you attempt to save a model
instance, before sending the INSERT or UPDATE query to the database,
Sequel::Model will attempt to validate the instance by calling
+validate+. If +validate+ does not add any errors to the object, the object is
considered valid, and valid? will return true. If +validate+ adds any errors
to the object, valid? will return false, and the save will either raise
a Sequel::ValidationFailed exception (the default), or return nil (if +raise_on_save_failure+
is false).
By validating the object before sending the database query, Sequel attempts to
ensure that invalid objects are not saved in the database. However, if you
are not enforcing the same validations in the database via constraints, it's
possible that invalid data can get added to the database via some other method.
This leads to odd cases such as retrieving a model object from the database,
not making any changes to it, attempting to save it, and having the save
raise an error.
== Skipping Validations
Sequel::Model uses the +save+ method to save model objects, and all
saving of model objects passes through the +save+ method. This means that all
saving of model objects goes through the validation process.
The only way to skip validations when saving a model object is to pass the
validate: false option to +save+. If you use that option, +save+ will
not attempt to validate the object before saving it.
Note that it's always possible to update the instance's database row without using
+save+, by using a Sequel dataset to update it, or updating it via another program.
Validations will only be run if you call
+save+ on the model object, or another model method that calls +save+. For example,
the +create+ class method instantiates a new instance of the model, and then calls
+save+, so it validates the object. However, the +insert+ class method is a dataset
method that just inserts the raw hash into the database, so it doesn't validate the
object.
== valid? and +validate+
Sequel::Model uses the valid? method to check whether or not a
model instance is valid. This method should not be overridden. Instead, the
+validate+ method should be overridden to add validations to the model:
class Album < Sequel::Model
def validate
super
errors.add(:name, 'cannot be empty') if !name || name.empty?
end
end
Album.new.valid? # false
Album.new(name: '').valid? # false
Album.new(name: 'RF').valid? # true
If the valid? method returns false, you can call the +errors+ method to
get an instance of Sequel::Model::Errors describing the errors on the model:
a = Album.new
# => #
a.valid?
# => false
a.errors
# => {:name=>["cannot be empty"]}
You may notice that the +errors+ method appears to return a hash. That's because
Sequel::Model::Errors is a subclass of Hash.
Note that calling the +errors+ method before the valid? method will result
in an +errors+ being empty:
Album.new.errors
# => {}
So just remember that you shouldn't check +errors+ until after you call valid?.
Sequel::Model::Errors has some helper methods that make it easy to get an array of
all of the instance's errors, or for checking for errors on a specific attribute. These
will be covered later in this guide.
== +validation_helpers+
While Sequel::Model does provide a validations framework, it does not define
any built-in validation helper methods that you can call. However, Sequel ships with a
plugin called +validation_helpers+ that handles most basic validation needs. So instead of
specifying validations like this:
class Album < Sequel::Model
def validate
super
errors.add(:name, 'cannot be empty') if !name || name.empty?
errors.add(:name, 'is already taken') if name && new? && Album[{name: name}]
errors.add(:website, 'cannot be empty') if !website || website.empty?
errors.add(:website, 'is not a valid URL') unless website =~ /\Ahttps?:\/\//
end
end
You can call simple methods such as:
class Album < Sequel::Model
plugin :validation_helpers
def validate
super
validates_presence [:name, :website]
validates_unique :name
validates_format /\Ahttps?:\/\//, :website, message: 'is not a valid URL'
end
end
Other than +validates_unique+, which has its own API, the methods defined by
+validation_helpers+ have one of the following two APIs:
(atts, opts={}):: For methods such as +validates_presence+, which do not
take an additional argument.
(arg, atts, opts={}):: For methods such as +validates_format+, which take an
additional argument.
For both of these APIs, +atts+ is either a column symbol or array of column symbols,
and +opts+ is an optional options hash.
The following methods are provided by +validation_helpers+:
=== +validates_presence+
This method checks that the specified attributes are not blank. In general, if an object responds to blank?, it calls the method to determine if the object is blank. Otherwise, nil is considered blank, empty strings or strings that just contain whitespace are blank, and objects that respond to empty? and return true are considered blank. All other objects are considered non-blank for the purposes of +validates_presence+. This means that +validates_presence+ is safe to use on boolean columns where you want to ensure that either true or false is used, but not NULL.
class Album < Sequel::Model
def validate
super
validates_presence [:name, :website, :debut_album]
end
end
=== +validates_not_null+
This is similar to +validates_presence+, but only checks for NULL/nil values, allowing other blank objects such as empty strings or strings with just whitespace.
=== +validates_format+
+validates_format+ is used to ensure that the string value of the specified attributes matches the specified regular expression. It's useful for checking that fields such as email addresses, URLs, UPC codes, ISBN codes, and the like, are in a specific format. It can also be used to validate that only certain characters are used in the string.
class Album < Sequel::Model
def validate
super
validates_format /\A\d\d\d-\d-\d{7}-\d-\d\z/, :isbn
validates_format /\A[0-9a-zA-Z:' ]+\z/, :name
end
end
=== +validates_exact_length+, +validates_min_length+, +validates_max_length+, +validates_length_range+
These methods all deal with ensuring that the length of the specified attribute matches the criteria specified by the first argument to the method. +validates_exact_length+ is for checking that the length of the attribute is equal to that value, +validates_min_length+ is for checking that the length of the attribute is greater than or equal to that value, +validates_max_length+ is for checking that the length of the attribute is less than or equal to that value, and +validates_length_range+ is for checking that the length of the attribute falls in the value, which should be a range or an object that responds to include?.
class Album < Sequel::Model
def validate
super
validates_exact_length 17, :isbn
validates_min_length 3, :name
validates_max_length 100, :name
validates_length_range 3..100, :name
end
end
=== +validates_integer+, +validates_numeric+
These methods check that the specified attributes can be valid integers or valid floats. +validates_integer+ tests the attribute value using Kernel.Integer and +validates_numeric+ tests the attribute using Kernel.Float. If the Kernel methods raise an exception, the validation fails, otherwise it succeeds.
class Album < Sequel::Model
def validate
super
validates_integer :copies_sold
validates_numeric :replaygain
end
end
=== +validates_includes+
+validates_includes+ checks that the specified attributes are included in the first argument to the method, which is usually an array, but can be any object that responds to include?.
class Album < Sequel::Model
def validate
super
validates_includes [1, 2, 3, 4, 5], :rating
end
end
=== +validates_operator+
+validates_operator+ checks that a given +operator+ method returns a truthy value when called on attribute with a specified value for comparison. Generally, this is used for inequality checks (>, >=, etc.) but any method that can be called on the attribute that accepts an argument and returns a truthy value may be used.
class Album < Sequel::Model
def validate
super
validates_operator(:>, 3, :tracks)
end
end
=== +validates_type+
+validates_type+ checks that the specified attributes are instances of the class specified in the first argument. The class can be specified as the class itself, or as a string or symbol with the class name, or as a an array of classes.
class Album < Sequel::Model
def validate
super
validates_type String, [:name, :website]
validates_type :Artist, :artist
validates_type [String, Integer], :foo
end
end
=== +validates_schema_types+
+validates_schema_types+ uses the database metadata for the model's table to determine which ruby type(s) should be used for the given database type, and calls +validates_type+ with that ruby type. It's designed to be used with the default raise_on_typecast_failure = false setting, where Sequel will attempt to typecast values, but silently ignore any errors raised:
album = Album.new
album.copies_sold = '1'
album.copies_sold # => 1
album.copies_sold = 'banana'
album.copies_sold # => 'banana'
In general, you can call +validates_schema_types+ with all columns. If any of those columns has a value that doesn't match the type that Sequel expects, it's probably because the column was set and Sequel was not able to typecast it correctly, which means it probably isn't valid. For example, let's say that you want to check that a couple of columns contain valid dates:
class Album < Sequel::Model
def validate
super
validates_schema_types [:release_date, :record_date]
end
end
album = Album.new
album.release_date = 'banana'
album.release_date # => 'banana'
album.record_date = '2010-05-17'
album.record_date # => #
album.valid? # => false
album.errors # => {:release_date=>["is not a valid date"]}
For web applications, you usually want the default setting, so that you can accept all of the input without raising an error, and then present the user with all error messages. If raise_on_typecast_failure = true is set and the user submits any invalid data, Sequel will immediately raise an error. +validates_schema_types+ is helpful because it allows you to check for typecasting errors on columns, and provides a good default error message stating that the attribute is not of the expected type.
=== +validates_unique+
+validates_unique+ has a similar but different API than the other +validation_helpers+ methods. It takes an arbitrary number of arguments, which should be column symbols or arrays of column symbols. If any argument is a symbol, Sequel sets up a unique validation for just that column. If any argument is an array of symbols, Sequel sets up a unique validation for the combination of the columns. This means that you get different behavior depending on whether you call the object with an array or with separate arguments. For example:
validates_unique(:name, :artist_id)
Will set up a 2 separate uniqueness validations. It will make it so that no two albums can have the same name, and that each artist can only be associated with one album. In general, that's probably not what you want. You probably want it so that two albums can have the same name, unless they are by the same artist. To do that, you need to use an array:
validates_unique([:name, :artist_id])
That sets up a single uniqueness validation for the combination of the fields.
You can mix and match the two approaches. For example, if all albums should have a unique UPC, and no artist can have duplicate album names:
validates_unique(:upc, [:name, :artist_id])
+validates_unique+ also accepts a block to scope the uniqueness constraint. For example, if you want to ensure that all active albums have a unique name, but inactive albums can duplicate the name:
validates_unique(:name){|ds| ds.where(:active)}
If you provide a block, it is called with the dataset to use for the uniqueness check, which you can then filter to scope the uniqueness validation to a subset of the model's dataset.
You can also include an options hash as the last argument. Unlike the other validations, the options hash for +validates_unique+ only recognizes for these options:
:dataset :: The base dataset to use for the unique query, defaults to the model's dataset
:message :: The message to use
:only_if_modified :: Only check the uniqueness if the object is new or one of the columns has been modified (true by default).
:where :: A callable object where call takes three arguments, a dataset,
the current object, and an array of columns, and should return
a modified dataset that is filtered to include only rows with
the same values as the current object for each column in the array.
This is useful any time the unique constraints are derived from
the columns and not the columns themselves (such as unique constraints
on lower(column)).
+validates_unique+ is the only method in +validation_helpers+ that checks with the database. Attempting to validate uniqueness outside of the database suffers from a race condition, so any time you want to add a uniqueness validation, you should make sure to add a uniqueness constraint or unique index on the underlying database table. See the {"Migrations and Schema Modification" guide}[rdoc-ref:doc/migration.rdoc] for details on how to do that.
== +validation_helpers+ Options
All other +validation_helpers+ methods accept the following options:
=== :message
The :message option overrides the default validation error message. Can be either a string or a proc. If a string, it is used directly. If a proc, the proc is called and should return a string. If the validation method takes an argument before the array of attributes, that argument is passed as an argument to the proc.
class Album < Sequel::Model
def validate
super
validates_presence :copies_sold, message: 'was not given'
validates_min_length 3, :name, message: lambda{|s| "should be more than #{s} characters"}
end
end
=== :allow_nil
The :allow_nil option skips the validation if the attribute value is nil or if the attribute is not present. It's commonly used when you have a +validates_presence+ method already on the attribute, and don't want multiple validation errors for the same attribute:
class Album < Sequel::Model
def validate
super
validates_presence :copies_sold
validates_integer :copies_sold, allow_nil: true
end
end
Without the :allow_nil option to +validates_integer+, if the copies_sold attribute was nil, you would get two separate validation errors, instead of a single validation error.
=== :allow_blank
The :allow_blank is similar to the :allow_nil option, but instead of just skipping the attribute for nil values, it skips the attribute for all blank values. For example, let's say that artists can have a website. If they have one, it should be formatted like a URL, but it can be nil or an empty string if they don't have one.
class Album < Sequel::Model
def validate
super
validates_format /\Ahttps?:\/\//, :website, allow_blank: true
end
end
a = Album.new
a.website = ''
a.valid? # true
=== :allow_missing
The :allow_missing option is different from the :allow_nil option, in that instead of checking if the attribute value is nil, it checks if the attribute is present in the model instance's values hash. :allow_nil will skip the validation when the attribute is in the values hash and has a nil value and when the attribute is not in the values hash. :allow_missing will only skip the validation when the attribute is not in the values hash. If the attribute is in the values hash but has a nil value, :allow_missing will not skip it.
The purpose of this option is to work correctly with missing columns when inserting or updating records. Sequel only sends the attributes in the values hash when doing an insert or update. If the attribute is not present in the values hash, Sequel doesn't specify it, so the database will use the table's default value when inserting the record, or not modify the value when saving it. This is different from having an attribute in the values hash with a value of nil, which Sequel will send as NULL. If your database table has a non NULL default, this may be a good option to use. You don't want to use allow_nil, because if the attribute is in values but has a value nil, Sequel will attempt to insert a NULL value into the database, instead of using the database's default.
== Conditional Validation
Because Sequel uses the +validate+ instance method to handle validation, making validations conditional is easy as it works exactly the same as ruby's standard conditionals. For example, if you only want to validate an attribute when creating an object:
validates_presence :name if new?
If you only want to validate the attribute when updating an existing object:
validates_integer :copies_sold unless new?
Let's say you only to make a validation conditional on the status of the object:
validates_presence :name if status_id > 1
validates_integer :copies_sold if status_id > 3
You can use all the standard ruby conditional expressions, such as +case+:
case status_id
when 1
validates_presence :name
when 2
validates_presence [:name, :artist_id]
when 3
validates_presence [:name, :artist_id, :copies_sold]
end
You can make the input to some validations dependent on the values of another attribute:
validates_min_length(status_id > 2 ? 5 : 10, [:name])
validates_presence(status_id < 2 ? :name : [:name, :artist_id])
Basically, there's no special syntax you have to use for conditional validations. Just handle conditionals the way you would in other ruby code.
== Default Error Messages
These are the default error messages for all of the helper methods in +validation_helpers+:
:exact_length :: is not #{arg} characters
:format :: is invalid
:includes :: is not in range or set: #{arg.inspect}
:integer :: is not a number
:length_range :: is too short or too long
:max_length :: is longer than #{arg} characters
:min_length :: is shorter than #{arg} characters
:not_null :: is not present
:numeric :: is not a number
:schema_types :: is not a valid #{schema_type}
:type :: is not a #{arg}
:presence :: is not present
:unique :: is already taken
== Modifying the Default Options
You can override Sequel::Model#default_validation_helpers_options private method to override the default settings on a per validation type basis:
class Sequel::Model
private
def default_validation_helpers_options(type)
case type
when :presence
{message: 'cannot be empty'}
when :includes
{message: 'invalid option', allow_nil: true}
when :max_length
{message: lambda{|i| "cannot be more than #{i} characters"}, allow_nil: true}
when :format
{message: 'contains invalid characters', allow_nil: true}
else
super
end
end
end
== Custom Validations
Just as the first validation example showed, you aren't limited to the validation methods defined by +validation_helpers+. Inside the +validate+ method, you can add your own validations by adding to the instance's errors using errors.add whenever an attribute is not valid:
class Album < Sequel::Model
def validate
super
errors.add(:release_date, 'cannot be before record date') if release_date < record_date
end
end
Just like conditional validations, with custom validations you are just using the standard ruby conditionals, and calling errors.add with the column symbol and the error message if you detect invalid data.
It's fairly easy to create your own custom validations that can be reused in all your models. For example, if there is a common need to validate that one column in the model comes before another column:
class Sequel::Model
def validates_after(col1, col2)
errors.add(col1, "cannot be before #{col2}") if send(col1) < send(col2)
end
end
class Album < Sequel::Model
def validate
super
validates_after(:release_date, :record_date)
end
end
== Setting Validations for All Models
Let's say you want to add some default validations that apply to all of your model classes. It's fairly easy to do by overriding the +validate+ method in Sequel::Model, adding some validations to it, and if you override +validate+ in your model classes, just make sure to call +super+.
class Sequel::Model
def self.string_columns
@string_columns ||= columns.reject{|c| db_schema[c][:type] != :string}
end
def validate
super
validates_format(/\A[^\x00-\x08\x0e-\x1f\x7f\x81\x8d\x8f\x90\x9d]*\z/n,
model.string_columns,
message: "contains invalid characters")
end
end
This will make sure that all string columns in the model are validated to make sure they don't contain any invalid characters. Just remember that if you override the +validate+ method in your model classes, you need to call +super+:
class Album < Sequel::Model
def validate
super # Important!
validates_presence :name
end
end
If you forget to call +super+, the validations that you defined in Sequel::Model will not be enforced. It's a good idea to call super whenever you override one of Sequel::Model's methods, unless you specifically do not want the default behavior.
== Sequel::Model::Errors
As mentioned earlier, Sequel::Model::Errors is a subclass of Hash with a few special methods, the most common of which are described here:
=== +add+
+add+ is the method used to add error messages for a given column. It takes the column symbol as the first argument and the error message as the second argument:
errors.add(:name, 'is not valid')
=== +on+
+on+ is a method usually used after validation has been completed, to determine if there were any errors on a given attribute. It takes the column value, and returns an array of error messages if there were any, or nil if not:
errors.on(:name)
If you want to make some validations dependent upon the results of other validations, you may want to use +on+ inside your validates method:
validates_integer(:release_date) unless errors.on(:record_date)
Here, you don't care about validating the release date if there were validation errors for the record date.
=== +full_messages+
+full_messages+ returns an array of error messages for the object. It's commonly called after validation to get a list of error messages to display to the user:
album.errors
# => {:name=>["cannot be empty"]}
album.errors.full_messages
# => ["name cannot be empty"]
Note that the column names used in the errors are used verbatim in the error messages. If you want full control over the error messages, you can use +add+ with a literal string:
errors.add(:name, Sequel.lit("Album name is not valid"))
errors.full_messages
# => ["Album name is not valid"]
Alternatively, feel free to override Sequel::Model::Errors#full_messages. As long as it returns an array of strings, overriding it is completely safe.
=== +count+
+count+ returns the total number of error messages in the errors.
album.errors.count # => 1
== Other Validation Plugins
=== +constraint_validations+
Sequel ships with a +constraint_validations+ plugin and extension, that allows you to setup constraints when creating your database tables, and have Model validations automatically created that mirror those constraints.
=== +auto_validations+
auto_validations uses the not null and type information obtained from parsing the database schema, and the unique index information from parsing the database's index information, and automatically setting up not_null, string length, schema type, and unique validations. If you don't require customizing validation messages on a per-column basis, it can DRY up a lot of validation code.
=== +validation_class_methods+
Sequel ships with the +validation_class_methods+ plugin, which uses class methods instead of instance methods to define validations. It exists mostly for legacy compatibility, but it is still supported.
sequel-5.63.0/doc/virtual_rows.rdoc 0000664 0000000 0000000 00000021172 14342141206 0017261 0 ustar 00root root 0000000 0000000 = Virtual Row Blocks
Dataset methods where, order, and select all take blocks that are referred to as
virtual row blocks. Many other dataset methods pass the blocks
they are given into one of those three methods, so there are actually
many Sequel::Dataset methods that take virtual row blocks.
== Why Virtual Rows
Virtual rows offer a less verbose way to express many queries. For example,
by default if you want to express an inequality filter in Sequel, you can do:
dataset.where(Sequel[:a] > Sequel.function(:b, :c))
# WHERE (a > b(c))
With virtual rows, you can use the less verbose:
dataset.where{a > b(c)}
# WHERE (a > b(c))
== Regular Procs vs Instance Evaled Procs
Virtual row blocks behave differently depending on whether the block accepts
an argument. If the block accepts an argument, it is called with an instance
of Sequel::SQL::VirtualRow. If it does not accept an argument, it is
evaluated in the context of an instance of Sequel::SQL::VirtualRow.
ds = DB[:items]
# Regular block
ds.where{|o| o.column > 1}
# WHERE (column > 1)
# Instance-evaled block
ds.where{column > 1}
# WHERE (column > 1)
If you aren't familiar with the difference between regular blocks and instance
evaled blocks, inside regular blocks methods called without an explicit receiver call
the method on the receiver in the surrounding scope, while instance
evaled blocks call the method on the receiver of the instance_eval call (the
Sequel::SQL::VirtualRow instance in this case).
in both cases, local variables available in the surrounding scope will be available
inside the block. However, instance variables in the surrounding scope will not
be available inside the block if using an instance evaled block, and methods called
without an explicit receiver inside an instance evaled block will not call
methods in the surrounding scope. For example:
def self.a
42
end
b = 32
@d = 100
# Regular block
ds.where{|o| o.c > a - b + @d}
# WHERE (c > 110)
# Instance-evaled block
ds.where{c > a - b + @d}
# WHERE (c > ((a - 32) + NULL))
There are three related differences here:
* Regular blocks use +o.c+ instead of just +c+
* +a+ results in 42 in the regular block, but creates an expression object in the instance evaled block
* @d results in 100 in the regular block, but nil in the instance evaled block
In the regular block, you need to call +c+ with an explicit receiver (the virtual
row block argument), while in the instance evaled block +c+ can be called directly,
as the default receiver has changed inside the block.
For +a+, note how ruby calls the method on
the receiver of the surrounding scope in the regular block, which returns an integer,
and does the subtraction before Sequel gets access to it. In the instance evaled
block, calling +a+ without a receiver calls the a method on the VirtualRow instance.
For @d, note that in a regular block, the value hasn't changed, but in the instance evaled
block, instance variable access returns nil.
For +b+, note that it operates the same in both cases, as it is a local variable.
The choice for whether to use a regular block or an instance evaled block is
up to you. The same things can be accomplished with both.
Instance evaled block tend to produce shorter code, but by modifying the scope
can be more difficult to understand.
If you are not sure which to use, use instance evaled blocks unless you need to
call methods or access instance variables of the surrounding scope inside the block.
== Local Variables vs Method Calls
If you have a method that accepts 0 arguments and has the same name as a local
variable, you can call it with () to differentiate the method call from the
local variable access. This is mostly useful in instance evaled blocks:
b = 32
ds.where{b() > b}
# WHERE b > 32
It's also possible to use an explicit self receiver in instance evaled blocks:
b = 32
ds.where{self.b > b}
# WHERE b > 32
== VirtualRow Methods
VirtualRow is a class that returns SQL::Identifiers or SQL::Functions depending
on how it is called.
== SQL::Identifiers - Regular columns
SQL::Identifiers can be thought of as regular column references in SQL,
not qualified by any table. You get an SQL::Identifier if the method is called
without arguments:
ds.where{|o| o.column > 1}
ds.where{column > 1}
# WHERE (column > 1)
== SQL::QualifiedIdentifiers - Qualified columns
You can qualified identifiers by calling #[] on an identifier:
ds.where{|o| o.table[:column] > 1}
ds.where{table[:column] > 1}
# WHERE table.column > 1
== SQL::Functions - SQL function calls
SQL::Functions can be thought of as function calls in SQL. You get a simple
function call if you call a method with arguments:
ds.where{|o| o.function(1) > 1}
ds.where{function(1) > 1}
# WHERE function(1) > 1
To call a SQL function with multiple arguments, just use those arguments in
your function call:
ds.where{|o| o.function(1, o.a) > 1}
ds.where{function(1, a) > 1}
# WHERE function(1, a) > 1
If the SQL function does not accept any arguments, create an identifier, then
call the function method on it to produce a function:
ds.select{|o| o.version.function}
ds.select{version.function}
# SELECT version()
To use the SQL wildcard (*) as the sole argument in a function call, create a
function without arguments, then call the * method on the function:
ds.select{|o| o.count.function.*}
ds.select{count.function.*}
# SELECT count(*)
To append the DISTINCT keyword before the method arguments, just call the
distinct method on the returned Function:
ds.select{|o| o.count(o.col1).distinct}
ds.select{count(col1).distinct}
# SELECT count(DISTINCT col1)
ds.select{|o| o.count(o.col1, o.col2).distinct}
ds.select{count(col1, col2).distinct}
# SELECT count(DISTINCT col1, col2)
== SQL::Functions with windows - SQL window function calls
To create a window function call, just call the over method on the Function
object returned, with the options for the window:
ds.select{|o| o.rank.function.over}
ds.select{rank.function.over}
# SELECT rank() OVER ()
ds.select{|o| o.count.function.*.over}
ds.select{count.function.*.over}
# SELECT count(*) OVER ()
ds.select{|o| o.sum(o.col1).over(partition: o.col2, order: o.col3)}
ds.select{sum(col1).over(partition: col2, order: col3)}
# SELECT sum(col1) OVER (PARTITION BY col2 ORDER BY col3)
== Operators
VirtualRows use method_missing to handle almost all method calls. Since the
objects given by method_missing are SQL::Identifiers or SQL::Functions, you can use all operators that they provide (see
DatasetFiltering[http://sequel.jeremyevans.net/rdoc/files/doc/dataset_filtering_rdoc.html#label-Filtering+using+expressions]):
ds.select{|o| o.price - 100}
ds.select{price - 100}
# SELECT (price - 100)
ds.where{|o| (o.price < 200) & (o.tax * 100 >= 23)}
ds.where{(price < 200) & (tax * 100 >= 0.23)}
# WHERE ((price < 200) AND ((tax * 100) >= 0.23))
However, VirtualRows have special handling of some operator methods to make
certain things easier. The operators all use a prefix form.
=== Math Operators
The standard +, -, *, and / mathematical operators are defined:
ds.select{|o| o.-(1, o.a).as(b)}
ds.select{self.-(1, a).as(b)}
# SELECT (1 - a) AS b
=== Boolean Operators
The & and | methods are defined to use AND and OR:
ds.where{|o| o.&({a: :b}, :c)}
ds.where{self.&({a: :b}, :c)}
# WHERE ((a = b) AND c)
The ~ method is defined to do inversion:
ds.where{|o| o.~({a: 1, b: 2})}
ds.where{self.~({a: 1, b: 2})}
# WHERE ((a != 1) OR (b != 2))
=== Inequality Operators
The standard >, <, >=, and <= inequality operators are defined:
ds.where{|o| o.>(1, :c)}
ds.where{self.>(1, :c)}
# WHERE (1 > c)
== Returning multiple values
It's common when using select and order virtual row blocks to want to
return multiple values. If you want to do that, you just need to return an
array:
ds.select{|o| [o.column1, o.sum(o.column2).as(o.sum)]}
ds.select{[column1, sum(column2).as(sum)]}
# SELECT column1, sum(column2) AS sum
Note that if you forget the array brackets, you'll end up with a syntax error:
# Invalid ruby syntax
ds.select{|o| o.column1, o.sum(o.column2).as(o.sum)}
ds.select{column1, sum(column2).as(sum)}
== Split symbols
Note that if you turn on symbol splitting for backwards compatibility,
Sequel will split virtual row methods with double underscores and
return them as qualified identifiers:
Sequel.split_symbols = true
ds.where{|o| o.table__column}
ds.where{table__column}
WHERE table.column
It's not recommended that you rely on this, it's better to convert the calls
to the recommended form:
ds.where{|o| o.table[:column]}
ds.where{table[:column]}
sequel-5.63.0/lib/ 0000775 0000000 0000000 00000000000 14342141206 0013646 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel.rb 0000664 0000000 0000000 00000000077 14342141206 0015475 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative 'sequel/model'
sequel-5.63.0/lib/sequel/ 0000775 0000000 0000000 00000000000 14342141206 0015144 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/ 0000775 0000000 0000000 00000000000 14342141206 0016747 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/ado.rb 0000664 0000000 0000000 00000021616 14342141206 0020045 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'win32ole'
module Sequel
# The ADO adapter provides connectivity to ADO databases in Windows.
module ADO
# ADO constants (DataTypeEnum)
# Source: https://msdn.microsoft.com/en-us/library/ms675318(v=vs.85).aspx
AdBigInt = 20
AdBinary = 128
#AdBoolean = 11
#AdBSTR = 8
#AdChapter = 136
#AdChar = 129
#AdCurrency = 6
#AdDate = 7
AdDBDate = 133
#AdDBTime = 134
AdDBTimeStamp = 135
#AdDecimal = 14
#AdDouble = 5
#AdEmpty = 0
#AdError = 10
#AdFileTime = 64
#AdGUID = 72
#AdIDispatch = 9
#AdInteger = 3
#AdIUnknown = 13
AdLongVarBinary = 205
#AdLongVarChar = 201
#AdLongVarWChar = 203
AdNumeric = 131
#AdPropVariant = 138
#AdSingle = 4
#AdSmallInt = 2
#AdTinyInt = 16
#AdUnsignedBigInt = 21
#AdUnsignedInt = 19
#AdUnsignedSmallInt = 18
#AdUnsignedTinyInt = 17
#AdUserDefined = 132
AdVarBinary = 204
#AdVarChar = 200
#AdVariant = 12
AdVarNumeric = 139
#AdVarWChar = 202
#AdWChar = 130
bigint = Object.new
def bigint.call(v)
v.to_i
end
numeric = Object.new
def numeric.call(v)
if v.include?(',')
BigDecimal(v.tr(',', '.'))
else
BigDecimal(v)
end
end
binary = Object.new
def binary.call(v)
Sequel.blob(v.pack('c*'))
end
date = Object.new
def date.call(v)
Date.new(v.year, v.month, v.day)
end
CONVERSION_PROCS = {}
[
[bigint, AdBigInt],
[numeric, AdNumeric, AdVarNumeric],
[date, AdDBDate],
[binary, AdBinary, AdVarBinary, AdLongVarBinary]
].each do |callable, *types|
callable.freeze
types.each do |i|
CONVERSION_PROCS[i] = callable
end
end
CONVERSION_PROCS.freeze
class Database < Sequel::Database
set_adapter_scheme :ado
attr_reader :conversion_procs
# In addition to the usual database options,
# the following options have an effect:
#
# :command_timeout :: Sets the time in seconds to wait while attempting
# to execute a command before cancelling the attempt and generating
# an error. Specifically, it sets the ADO CommandTimeout property.
# :driver :: The driver to use in the ADO connection string. If not provided, a default
# of "SQL Server" is used.
# :conn_string :: The full ADO connection string. If this is provided,
# the usual options are ignored.
# :provider :: Sets the Provider of this ADO connection (for example, "SQLOLEDB").
# If you don't specify a provider, the default one used by WIN32OLE
# has major problems, such as creating a new native database connection
# for every query, which breaks things such as temporary tables.
#
# Pay special attention to the :provider option, as without specifying a provider,
# many things will be broken. The SQLNCLI10 provider appears to work well if you
# are connecting to Microsoft SQL Server, but it is not the default as that is not
# always available and would break backwards compatability.
def connect(server)
opts = server_opts(server)
s = opts[:conn_string] || "driver=#{opts[:driver]};server=#{opts[:host]};database=#{opts[:database]}#{";uid=#{opts[:user]};pwd=#{opts[:password]}" if opts[:user]}"
handle = WIN32OLE.new('ADODB.Connection')
handle.CommandTimeout = opts[:command_timeout] if opts[:command_timeout]
handle.Provider = opts[:provider] if opts[:provider]
handle.Open(s)
handle
end
def disconnect_connection(conn)
conn.Close
rescue WIN32OLERuntimeError
nil
end
def freeze
@conversion_procs.freeze
super
end
# Just execute so it doesn't attempt to return the number of rows modified.
def execute_ddl(sql, opts=OPTS)
execute(sql, opts)
end
# Just execute so it doesn't attempt to return the number of rows modified.
def execute_insert(sql, opts=OPTS)
execute(sql, opts)
end
# Use pass by reference in WIN32OLE to get the number of affected rows,
# unless is a provider is in use (since some providers don't seem to
# return the number of affected rows, but the default provider appears
# to).
def execute_dui(sql, opts=OPTS)
return super if opts[:provider]
synchronize(opts[:server]) do |conn|
begin
log_connection_yield(sql, conn){conn.Execute(sql, 1)}
WIN32OLE::ARGV[1]
rescue ::WIN32OLERuntimeError => e
raise_error(e)
end
end
end
def execute(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
begin
r = log_connection_yield(sql, conn){conn.Execute(sql)}
begin
yield r if defined?(yield)
ensure
begin
r.close
rescue ::WIN32OLERuntimeError
end
end
rescue ::WIN32OLERuntimeError => e
raise_error(e)
end
end
nil
end
private
def adapter_initialize
case @opts[:conn_string]
when /Microsoft\.(Jet|ACE)\.OLEDB/io
require_relative 'ado/access'
extend Sequel::ADO::Access::DatabaseMethods
self.dataset_class = ADO::Access::Dataset
else
@opts[:driver] ||= 'SQL Server'
case @opts[:driver]
when 'SQL Server'
require_relative 'ado/mssql'
extend Sequel::ADO::MSSQL::DatabaseMethods
self.dataset_class = ADO::MSSQL::Dataset
set_mssql_unicode_strings
end
end
@conversion_procs = CONVERSION_PROCS.dup
@conversion_procs[AdDBTimeStamp] = method(:adb_timestamp_to_application_timestamp)
super
end
def adb_timestamp_to_application_timestamp(v)
# This hard codes a timestamp_precision of 6 when converting.
# That is the default timestamp_precision, but the ado/mssql adapter uses a timestamp_precision
# of 3. However, timestamps returned by ado/mssql have nsec values that end up rounding to a
# the same value as if a timestamp_precision of 3 was hard coded (either xxx999yzz, where y is
# 5-9 or xxx000yzz where y is 0-4).
#
# ADO subadapters should override this they would like a different timestamp precision and the
# this code does not work for them (for example, if they provide full nsec precision).
#
# Note that fractional second handling for WIN32OLE objects is not correct on ruby <2.2
to_application_timestamp([v.year, v.month, v.day, v.hour, v.min, v.sec, (v.nsec/1000.0).round * 1000])
end
def dataset_class_default
Dataset
end
# The ADO adapter's default provider doesn't support transactions, since it
# creates a new native connection for each query. So Sequel only attempts
# to use transactions if an explicit :provider is given.
def begin_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
def commit_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
def database_error_classes
[::WIN32OLERuntimeError]
end
def disconnect_error?(e, opts)
super || (e.is_a?(::WIN32OLERuntimeError) && e.message =~ /Communication link failure/)
end
def rollback_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
end
class Dataset < Sequel::Dataset
def fetch_rows(sql)
execute(sql) do |recordset|
cols = []
conversion_procs = db.conversion_procs
recordset.Fields.each do |field|
cols << [output_identifier(field.Name), conversion_procs[field.Type]]
end
self.columns = cols.map(&:first)
return if recordset.EOF
max = cols.length
recordset.GetRows.transpose.each do |field_values|
h = {}
i = -1
while (i += 1) < max
name, cp = cols[i]
h[name] = if (v = field_values[i]) && cp
cp.call(v)
else
v
end
end
yield h
end
end
end
# ADO can return for for delete and update statements, depending on the provider.
def provides_accurate_rows_matched?
false
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/ado/ 0000775 0000000 0000000 00000000000 14342141206 0017512 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/ado/access.rb 0000664 0000000 0000000 00000025171 14342141206 0021306 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/access'
require_relative '../utils/split_alter_table'
module Sequel
module ADO
# Database and Dataset instance methods for Access specific
# support via ADO.
module Access
class AdoSchema
QUERY_TYPE = {
:columns => 4,
:indexes => 12,
:tables => 20,
:views => 23,
:foreign_keys => 27
}.freeze
attr_reader :type, :criteria
def initialize(type, crit)
@type = QUERY_TYPE[type]
@criteria = Array(crit)
end
class Column
DATA_TYPE = {
2 => "SMALLINT",
3 => "INTEGER",
4 => "REAL",
5 => "DOUBLE",
6 => "MONEY",
7 => "DATETIME",
11 => "BIT",
14 => "DECIMAL",
16 => "TINYINT",
17 => "BYTE",
72 => "GUID",
128 => "BINARY",
130 => "TEXT",
131 => "DECIMAL",
201 => "TEXT",
205 => "IMAGE"
}.freeze
DATA_TYPE.each_value(&:freeze)
def initialize(row)
@row = row
end
def [](col)
@row[col]
end
def allow_null
self["IS_NULLABLE"]
end
def default
self["COLUMN_DEFAULT"]
end
def db_type
t = DATA_TYPE[self["DATA_TYPE"]]
if t == "DECIMAL" && precision
t + "(#{precision.to_i},#{(scale || 0).to_i})"
elsif t == "TEXT" && maximum_length && maximum_length > 0
t + "(#{maximum_length.to_i})"
else
t
end
end
def precision
self["NUMERIC_PRECISION"]
end
def scale
self["NUMERIC_SCALE"]
end
def maximum_length
self["CHARACTER_MAXIMUM_LENGTH"]
end
end
end
module DatabaseMethods
include Sequel::Access::DatabaseMethods
include Sequel::Database::SplitAlterTable
# Remove cached schema after altering a table, since otherwise it can be cached
# incorrectly in the rename column case.
def alter_table(name, *)
super
remove_cached_schema(name)
nil
end
# Access doesn't let you disconnect if inside a transaction, so
# try rolling back an existing transaction first.
def disconnect_connection(conn)
conn.RollbackTrans rescue nil
super
end
def execute_insert(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
begin
log_connection_yield(sql, conn){conn.Execute(sql)}
last_insert_sql = "SELECT @@IDENTITY"
res = log_connection_yield(last_insert_sql, conn){conn.Execute(last_insert_sql)}
res.GetRows.transpose.each{|r| return r.shift}
rescue ::WIN32OLERuntimeError => e
raise_error(e)
end
end
nil
end
def tables(opts=OPTS)
m = output_identifier_meth
ado_schema_tables.map {|tbl| m.call(tbl['TABLE_NAME'])}
end
def views(opts=OPTS)
m = output_identifier_meth
ado_schema_views.map {|tbl| m.call(tbl['TABLE_NAME'])}
end
# OpenSchema returns compound indexes as multiple rows
def indexes(table_name,opts=OPTS)
m = output_identifier_meth
idxs = ado_schema_indexes(table_name).inject({}) do |memo, idx|
unless idx["PRIMARY_KEY"]
index = memo[m.call(idx["INDEX_NAME"])] ||= {
:columns=>[], :unique=>idx["UNIQUE"]
}
index[:columns] << m.call(idx["COLUMN_NAME"])
end
memo
end
idxs
end
# OpenSchema returns compound foreign key relationships as multiple rows
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
fks = ado_schema_foreign_keys(table).inject({}) do |memo, fk|
name = m.call(fk['FK_NAME'])
specs = memo[name] ||= {
:columns => [],
:table => m.call(fk['PK_TABLE_NAME']),
:key => [],
:deferrable => fk['DEFERRABILITY'],
:name => name,
:on_delete => fk['DELETE_RULE'],
:on_update => fk['UPDATE_RULE']
}
specs[:columns] << m.call(fk['FK_COLUMN_NAME'])
specs[:key] << m.call(fk['PK_COLUMN_NAME'])
memo
end
fks.values
end
private
# Emulate rename_column by adding the column, copying data from the old
# column, and dropping the old column.
def alter_table_sql(table, op)
case op[:op]
when :rename_column
unless sch = op[:schema]
raise(Error, "can't find existing schema entry for #{op[:name]}") unless sch = op[:schema] || schema(table).find{|c| c.first == op[:name]}
sch = sch.last
end
[
alter_table_sql(table, :op=>:add_column, :name=>op[:new_name], :default=>sch[:ruby_default], :type=>sch[:db_type], :null=>sch[:allow_null]),
from(table).update_sql(op[:new_name]=>op[:name]),
alter_table_sql(table, :op=>:drop_column, :name=>op[:name])
]
when :set_column_null, :set_column_default
raise(Error, "can't find existing schema entry for #{op[:name]}") unless sch = op[:schema] || schema(table).find{|c| c.first == op[:name]}
sch = sch.last
sch = if op[:op] == :set_column_null
sch.merge(:allow_null=>op[:null])
else
sch.merge(:ruby_default=>op[:default])
end
[
alter_table_sql(table, :op=>:rename_column, :name=>op[:name], :new_name=>:sequel_access_backup_column, :schema=>sch),
alter_table_sql(table, :op=>:rename_column, :new_name=>op[:name], :name=>:sequel_access_backup_column, :schema=>sch)
]
else
super
end
end
def begin_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.begin', conn){conn.BeginTrans}
end
def commit_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.commit', conn){conn.CommitTrans}
end
def rollback_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.rollback', conn){conn.RollbackTrans}
end
def schema_column_type(db_type)
case db_type.downcase
when 'bit'
:boolean
when 'byte', 'guid'
:integer
when 'image'
:blob
else
super
end
end
def schema_parse_table(table_name, opts)
m = output_identifier_meth(opts[:dataset])
m2 = input_identifier_meth(opts[:dataset])
tn = m2.call(table_name.to_s)
idxs = ado_schema_indexes(tn)
ado_schema_columns(tn).map {|row|
specs = {
:allow_null => row.allow_null,
:db_type => row.db_type,
:default => row.default,
:primary_key => !!idxs.find {|idx|
idx["COLUMN_NAME"] == row["COLUMN_NAME"] &&
idx["PRIMARY_KEY"]
},
:type => if row.db_type =~ /decimal/i && row.scale == 0
:integer
else
schema_column_type(row.db_type)
end,
:ado_type => row["DATA_TYPE"]
}
specs[:default] = nil if blank_object?(specs[:default])
specs[:allow_null] = specs[:allow_null] && !specs[:primary_key]
[ m.call(row["COLUMN_NAME"]), specs ]
}
end
def ado_schema_tables
rows=[]
fetch_ado_schema(:tables, [nil,nil,nil,'TABLE']) do |row|
rows << row
end
rows
end
def ado_schema_views
rows=[]
fetch_ado_schema(:views, [nil,nil,nil]) do |row|
rows << row
end
rows
end
def ado_schema_indexes(table_name)
rows=[]
fetch_ado_schema(:indexes, [nil,nil,nil,nil,table_name.to_s]) do |row|
rows << row
end
rows
end
def ado_schema_columns(table_name)
rows=[]
fetch_ado_schema(:columns, [nil,nil,table_name.to_s,nil]) do |row|
rows << AdoSchema::Column.new(row)
end
rows.sort!{|a,b| a["ORDINAL_POSITION"] <=> b["ORDINAL_POSITION"]}
end
def ado_schema_foreign_keys(table_name)
rows=[]
fetch_ado_schema(:foreign_keys, [nil,nil,nil,nil,nil,table_name.to_s]) do |row|
rows << row
end
rows.sort!{|a,b| a["ORDINAL"] <=> b["ORDINAL"]}
end
def fetch_ado_schema(type, criteria=[])
execute_open_ado_schema(type, criteria) do |s|
cols = []
s.Fields.each{|f| cols << f.Name}
s.GetRows.transpose.each do |r|
row = {}
cols.each{|c| row[c] = r.shift}
yield row
end unless s.eof
end
end
# This is like execute() in that it yields an ADO RecordSet, except
# instead of an SQL interface there's this OpenSchema call
# cf. http://msdn.microsoft.com/en-us/library/ee275721(v=bts.10)
def execute_open_ado_schema(type, criteria=[])
ado_schema = AdoSchema.new(type, criteria)
synchronize(opts[:server]) do |conn|
begin
r = log_connection_yield("OpenSchema #{type.inspect}, #{criteria.inspect}", conn) {
if ado_schema.criteria.empty?
conn.OpenSchema(ado_schema.type)
else
conn.OpenSchema(ado_schema.type, ado_schema.criteria)
end
}
yield(r) if defined?(yield)
rescue ::WIN32OLERuntimeError => e
raise_error(e)
end
end
nil
end
end
class Dataset < ADO::Dataset
include Sequel::Access::DatasetMethods
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/ado/mssql.rb 0000664 0000000 0000000 00000003736 14342141206 0021207 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/mssql'
module Sequel
module ADO
module MSSQL
module DatabaseMethods
include Sequel::MSSQL::DatabaseMethods
def execute_dui(sql, opts=OPTS)
return super unless @opts[:provider]
synchronize(opts[:server]) do |conn|
begin
sql = "SET NOCOUNT ON; #{sql}; SELECT @@ROWCOUNT"
rst = log_connection_yield(sql, conn){conn.Execute(sql)}
rst.GetRows[0][0]
rescue ::WIN32OLERuntimeError => e
raise_error(e)
end
end
end
private
# The ADO adapter's default provider doesn't support transactions, since it
# creates a new native connection for each query. So Sequel only attempts
# to use transactions if an explicit :provider is given.
def begin_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
def commit_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
def rollback_transaction(conn, opts=OPTS)
super if @opts[:provider]
end
end
class Dataset < ADO::Dataset
include Sequel::MSSQL::DatasetMethods
# Use a nasty hack of multiple SQL statements in the same call and
# having the last one return the most recently inserted id. This
# is necessary as ADO's default :provider uses a separate native
# connection for each query.
def insert(*values)
return super if (@opts[:sql] && !@opts[:prepared_sql]) || @opts[:returning]
with_sql("SET NOCOUNT ON; #{insert_sql(*values)}; SELECT CAST(SCOPE_IDENTITY() AS INTEGER)").single_value
end
# If you use a better :provider option for the database, you can get an
# accurate number of rows matched.
def provides_accurate_rows_matched?
!!db.opts[:provider]
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/amalgalite.rb 0000664 0000000 0000000 00000013050 14342141206 0021373 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'amalgalite'
require_relative 'shared/sqlite'
module Sequel
module Amalgalite
# Type conversion map class for Sequel's use of Amalgamite
class SequelTypeMap < ::Amalgalite::TypeMaps::DefaultMap
methods_handling_sql_types.delete('string')
methods_handling_sql_types.merge!(
'datetime' => %w'datetime timestamp',
'time' => %w'time',
'float' => ['float', 'double', 'real', 'double precision'],
'decimal' => %w'numeric decimal money'
)
# Store the related database object, in order to be able to correctly
# handle the database timezone.
def initialize(db)
@db = db
end
# Return blobs as instances of Sequel::SQL::Blob instead of
# Amalgamite::Blob
def blob(s)
SQL::Blob.new(s)
end
# Return numeric/decimal types as instances of BigDecimal
# instead of Float
def decimal(s)
BigDecimal(s)
end
# Return datetime types as instances of Sequel.datetime_class
def datetime(s)
@db.to_application_timestamp(s)
end
def time(s)
Sequel.string_to_time(s)
end
# Don't raise an error if the value is a string and the declared
# type doesn't match a known type, just return the value.
def result_value_of(declared_type, value)
if value.is_a?(::Amalgalite::Blob)
SQL::Blob.new(value.to_s)
elsif value.is_a?(String) && declared_type
(meth = self.class.sql_to_method(declared_type.downcase)) ? public_send(meth, value) : value
else
super
end
end
end
class Database < Sequel::Database
include ::Sequel::SQLite::DatabaseMethods
set_adapter_scheme :amalgalite
# Mimic the file:// uri, by having 2 preceding slashes specify a relative
# path, and 3 preceding slashes specify an absolute path.
def self.uri_to_options(uri) # :nodoc:
{ :database => (uri.host.nil? && uri.path == '/') ? nil : "#{uri.host}#{uri.path}" }
end
private_class_method :uri_to_options
# Connect to the database. Since SQLite is a file based database,
# the only options available are :database (to specify the database
# name), and :timeout, to specify how long to wait for the database to
# be available if it is locked, given in milliseconds (default is 5000).
def connect(server)
opts = server_opts(server)
opts[:database] = ':memory:' if blank_object?(opts[:database])
db = ::Amalgalite::Database.new(opts[:database])
db.busy_handler(::Amalgalite::BusyTimeout.new(opts.fetch(:timeout, 5000)/50, 50))
db.type_map = SequelTypeMap.new(self)
connection_pragmas.each{|s| log_connection_yield(s, db){db.execute_batch(s)}}
db
end
def database_type
:sqlite
end
def execute_ddl(sql, opts=OPTS)
_execute(sql, opts){|conn| log_connection_yield(sql, conn){conn.execute_batch(sql)}}
nil
end
def execute_dui(sql, opts=OPTS)
_execute(sql, opts){|conn| log_connection_yield(sql, conn){conn.execute_batch(sql)}; conn.row_changes}
end
def execute_insert(sql, opts=OPTS)
_execute(sql, opts){|conn| log_connection_yield(sql, conn){conn.execute_batch(sql)}; conn.last_insert_rowid}
end
def execute(sql, opts=OPTS)
_execute(sql, opts) do |conn|
begin
yield(stmt = log_connection_yield(sql, conn){conn.prepare(sql)})
ensure
stmt.close if stmt
end
end
end
# Run the given SQL with the given arguments and return the first value of the first row.
def single_value(sql, opts=OPTS)
_execute(sql, opts){|conn| log_connection_yield(sql, conn){conn.first_value_from(sql)}}
end
private
# Yield an available connection. Rescue
# any Amalgalite::Errors and turn them into DatabaseErrors.
def _execute(sql, opts)
synchronize(opts[:server]){|conn| yield conn}
rescue ::Amalgalite::Error, ::Amalgalite::SQLite3::Error => e
raise_error(e)
end
# The Amagalite adapter does not need the pool to convert exceptions.
# Also, force the max connections to 1 if a memory database is being
# used, as otherwise each connection gets a separate database.
def connection_pool_default_options
o = super.dup
# Default to only a single connection if a memory database is used,
# because otherwise each connection will get a separate database
o[:max_connections] = 1 if @opts[:database] == ':memory:' || blank_object?(@opts[:database])
o
end
def dataset_class_default
Dataset
end
def database_error_classes
[::Amalgalite::Error, ::Amalgalite::SQLite3::Error]
end
end
class Dataset < Sequel::Dataset
include ::Sequel::SQLite::DatasetMethods
def fetch_rows(sql)
execute(sql) do |stmt|
self.columns = cols = stmt.result_fields.map{|c| output_identifier(c)}
col_count = cols.size
stmt.each do |result|
row = {}
col_count.times{|i| row[cols[i]] = result[i]}
yield row
end
end
end
private
# Quote the string using the connection instance method.
def literal_string_append(sql, v)
db.synchronize(@opts[:server]){|c| sql << c.quote(v)}
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/ibmdb.rb 0000664 0000000 0000000 00000031315 14342141206 0020354 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'ibm_db'
require_relative 'shared/db2'
module Sequel
module IBMDB
tt = Class.new do
def boolean(s) !s.to_i.zero? end
def int(s) s.to_i end
end.new
# Hash holding type translation methods, used by Dataset#fetch_rows.
DB2_TYPES = {
:boolean => tt.method(:boolean),
:int => tt.method(:int),
:blob => ::Sequel::SQL::Blob.method(:new),
:time => ::Sequel.method(:string_to_time),
:date => ::Sequel.method(:string_to_date)
}.freeze
# Wraps an underlying connection to DB2 using IBM_DB, to provide a more
# rubyish API.
class Connection
# A hash with prepared statement name symbol keys, where each value is
# a two element array with an sql string and cached Statement value.
attr_reader :prepared_statements
# Error class for exceptions raised by the connection.
class Error < StandardError
attr_reader :sqlstate
def initialize(message, sqlstate)
@sqlstate = sqlstate
super(message)
end
end
# Create the underlying IBM_DB connection.
def initialize(connection_param)
@conn = if connection_param.class == String
IBM_DB.connect(connection_param, '', '')
else # connect using catalog
IBM_DB.connect(*connection_param)
end
self.autocommit = true
@prepared_statements = {}
end
# Check whether the connection is in autocommit state or not.
def autocommit
IBM_DB.autocommit(@conn) == 1
end
# Turn autocommit on or off for the connection.
def autocommit=(value)
IBM_DB.autocommit(@conn, value ? IBM_DB::SQL_AUTOCOMMIT_ON : IBM_DB::SQL_AUTOCOMMIT_OFF)
end
# Close the connection, disconnecting from DB2.
def close
IBM_DB.close(@conn)
end
# Commit the currently outstanding transaction on this connection.
def commit
IBM_DB.commit(@conn)
end
# Return the related error message for the connection.
def error_msg
IBM_DB.getErrormsg(@conn, IBM_DB::DB_CONN)
end
# Return the related error message for the connection.
def error_sqlstate
IBM_DB.getErrorstate(@conn, IBM_DB::DB_CONN)
end
# Execute the given SQL on the database, and return a Statement instance
# holding the results.
def execute(sql)
stmt = IBM_DB.exec(@conn, sql)
raise Error.new(error_msg, error_sqlstate) unless stmt
Statement.new(stmt)
end
# Execute the related prepared statement on the database with the given
# arguments.
def execute_prepared(ps_name, *values)
stmt = @prepared_statements[ps_name].last
res = stmt.execute(*values)
unless res
raise Error.new("Error executing statement #{ps_name}: #{error_msg}", error_sqlstate)
end
stmt
end
# Prepare a statement with the given +sql+ on the database, and
# cache the prepared statement value by name.
def prepare(sql, ps_name)
if stmt = IBM_DB.prepare(@conn, sql)
ps_name = ps_name.to_sym
stmt = Statement.new(stmt)
@prepared_statements[ps_name] = [sql, stmt]
else
err = error_msg
err = "Error preparing #{ps_name} with SQL: #{sql}" if error_msg.nil? || error_msg.empty?
raise Error.new(err, error_sqlstate)
end
end
# Rollback the currently outstanding transaction on this connection.
def rollback
IBM_DB.rollback(@conn)
end
end
# Wraps results returned by queries on IBM_DB.
class Statement
# Hold the given statement.
def initialize(stmt)
@stmt = stmt
end
# Return the number of rows affected.
def affected
IBM_DB.num_rows(@stmt)
end
# If this statement is a prepared statement, execute it on the database
# with the given values.
def execute(*values)
IBM_DB.execute(@stmt, values)
end
# Return the results of a query as an array of values.
def fetch_array
IBM_DB.fetch_array(@stmt) if @stmt
end
# Return the field name at the given column in the result set.
def field_name(ind)
IBM_DB.field_name(@stmt, ind)
end
# Return the field type for the given field name in the result set.
def field_type(key)
IBM_DB.field_type(@stmt, key)
end
# Return the field precision for the given field name in the result set.
def field_precision(key)
IBM_DB.field_precision(@stmt, key)
end
# Free the memory related to this statement.
def free
IBM_DB.free_stmt(@stmt)
end
# Free the memory related to this result set, only useful for prepared
# statements which have a different result set on every call.
def free_result
IBM_DB.free_result(@stmt)
end
# Return the number of fields in the result set.
def num_fields
IBM_DB.num_fields(@stmt)
end
end
class Database < Sequel::Database
include Sequel::DB2::DatabaseMethods
set_adapter_scheme :ibmdb
# Hash of connection procs for converting
attr_reader :conversion_procs
# Whether to convert smallint values to bool for this Database instance
attr_accessor :convert_smallint_to_bool
# Create a new connection object for the given server.
def connect(server)
opts = server_opts(server)
connection_params = if opts[:host].nil? && opts[:port].nil? && opts[:database]
# use a cataloged connection
opts.values_at(:database, :user, :password)
else
# use uncataloged connection so that host and port can be supported
'Driver={IBM DB2 ODBC DRIVER};' \
"Database=#{opts[:database]};" \
"Hostname=#{opts[:host]};" \
"Port=#{opts[:port] || 50000};" \
'Protocol=TCPIP;' \
"Uid=#{opts[:user]};" \
"Pwd=#{opts[:password]};" \
end
Connection.new(connection_params)
end
def execute(sql, opts=OPTS, &block)
if sql.is_a?(Symbol)
execute_prepared_statement(sql, opts, &block)
else
synchronize(opts[:server]){|c| _execute(c, sql, opts, &block)}
end
rescue Connection::Error => e
raise_error(e)
end
def execute_insert(sql, opts=OPTS)
synchronize(opts[:server]) do |c|
if sql.is_a?(Symbol)
execute_prepared_statement(sql, opts)
else
_execute(c, sql, opts)
end
_execute(c, "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM.SYSDUMMY1", opts){|stmt| i = stmt.fetch_array.first.to_i; i}
end
rescue Connection::Error => e
raise_error(e)
end
# Execute a prepared statement named by name on the database.
def execute_prepared_statement(ps_name, opts)
args = opts[:arguments]
ps = prepared_statement(ps_name)
sql = ps.prepared_sql
synchronize(opts[:server]) do |conn|
unless conn.prepared_statements.fetch(ps_name, []).first == sql
log_connection_yield("PREPARE #{ps_name}: #{sql}", conn){conn.prepare(sql, ps_name)}
end
args = args.map{|v| v.nil? ? nil : prepared_statement_arg(v)}
log_sql = "EXECUTE #{ps_name}"
if ps.log_sql
log_sql += " ("
log_sql << sql
log_sql << ")"
end
begin
stmt = log_connection_yield(log_sql, conn, args){conn.execute_prepared(ps_name, *args)}
if defined?(yield)
yield(stmt)
else
stmt.affected
end
ensure
stmt.free_result if stmt
end
end
end
def freeze
@conversion_procs.freeze
super
end
private
# Execute the given SQL on the database, yielding the related statement if a block
# is given or returning the number of affected rows if not, and ensuring the statement is freed.
def _execute(conn, sql, opts)
stmt = log_connection_yield(sql, conn){conn.execute(sql)}
if defined?(yield)
yield(stmt)
else
stmt.affected
end
ensure
stmt.free if stmt
end
def adapter_initialize
@convert_smallint_to_bool = typecast_value_boolean(opts.fetch(:convert_smallint_to_bool, true))
@conversion_procs = DB2_TYPES.dup
@conversion_procs[:timestamp] = method(:to_application_timestamp)
end
# IBM_DB uses an autocommit setting instead of sending SQL queries.
# So starting a transaction just turns autocommit off.
def begin_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.begin', conn){conn.autocommit = false}
set_transaction_isolation(conn, opts)
end
# This commits transaction in progress on the
# connection and sets autocommit back on.
def commit_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.commit', conn){conn.commit}
end
def database_error_classes
[Connection::Error]
end
def database_exception_sqlstate(exception, opts)
exception.sqlstate
end
def dataset_class_default
Dataset
end
# Don't convert smallint to boolean for the metadata
# dataset, since the DB2 metadata does not use
# boolean columns, and some smallint columns are
# accidently treated as booleans.
def _metadata_dataset
super.with_convert_smallint_to_bool(false)
end
# Format Numeric, Date, and Time types specially for use
# as IBM_DB prepared statements argument vlaues.
def prepared_statement_arg(v)
case v
when Numeric
v.to_s
when Date, Time
literal(v).gsub("'", '')
else
v
end
end
# Set autocommit back on
def remove_transaction(conn, committed)
conn.autocommit = true
ensure
super
end
# This rolls back the transaction in progress on the
# connection and sets autocommit back on.
def rollback_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.rollback', conn){conn.rollback}
end
# Convert smallint type to boolean if convert_smallint_to_bool is true
def schema_column_type(db_type)
if convert_smallint_to_bool && db_type =~ /smallint/i
:boolean
else
super
end
end
end
class Dataset < Sequel::Dataset
include Sequel::DB2::DatasetMethods
module CallableStatementMethods
# Extend given dataset with this module so subselects inside subselects in
# prepared statements work.
def subselect_sql_append(sql, ds)
ps = ds.to_prepared_statement(:select).
clone(:append_sql=>sql, :prepared_args=>prepared_args).
with_extend(CallableStatementMethods)
ps = ps.bind(@opts[:bind_vars]) if @opts[:bind_vars]
ps.prepared_sql
end
end
PreparedStatementMethods = prepared_statements_module(:prepare_bind, Sequel::Dataset::UnnumberedArgumentMapper)
# Whether to convert smallint to boolean arguments for this dataset.
# Defaults to the Database setting.
def convert_smallint_to_bool
opts.has_key?(:convert_smallint_to_bool) ? opts[:convert_smallint_to_bool] : db.convert_smallint_to_bool
end
# Return a cloned dataset with the convert_smallint_to_bool option set.
def with_convert_smallint_to_bool(v)
clone(:convert_smallint_to_bool=>v)
end
def fetch_rows(sql)
execute(sql) do |stmt|
columns = []
convert = convert_smallint_to_bool
cps = db.conversion_procs
stmt.num_fields.times do |i|
k = stmt.field_name i
key = output_identifier(k)
type = stmt.field_type(i).downcase.to_sym
# decide if it is a smallint from precision
type = :boolean if type == :int && convert && stmt.field_precision(i) < 8
type = :blob if type == :clob && db.use_clob_as_blob
columns << [key, cps[type]]
end
cols = columns.map{|c| c[0]}
self.columns = cols
while res = stmt.fetch_array
row = {}
res.zip(columns).each do |v, (k, pr)|
row[k] = ((pr ? pr.call(v) : v) if v)
end
yield row
end
end
self
end
private
def bound_variable_modules
[CallableStatementMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc.rb 0000664 0000000 0000000 00000071237 14342141206 0020210 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'java'
require_relative 'utils/stored_procedures'
module Sequel
module JDBC
# Make it accesing the java.sql hierarchy more ruby friendly.
module JavaSQL
include_package 'java.sql'
end
# Used to identify a jndi connection and to extract the jndi
# resource name.
JNDI_URI_REGEXP = /\Ajdbc:jndi:(.+)/
# Contains procs keyed on subadapter type that extend the
# given database object so it supports the correct database type.
DATABASE_SETUP = {}
# Create custom NativeException alias for nicer access, and also so that
# JRuby 9.2+ so it doesn't use the deprecated ::NativeException
NativeException = java.lang.Exception
# Default database error classes
DATABASE_ERROR_CLASSES = [NativeException]
if JRUBY_VERSION < '9.2'
# On JRuby <9.2, still include ::NativeException, as it is still needed in some cases
DATABASE_ERROR_CLASSES << ::NativeException
end
DATABASE_ERROR_CLASSES.freeze
# Allow loading the necessary JDBC support via a gem.
def self.load_gem(name)
require "jdbc/#{name.to_s.downcase}"
rescue LoadError
# jdbc gem not used, hopefully the user has the .jar in their CLASSPATH
else
if defined?(::Jdbc) && ( ::Jdbc.const_defined?(name) rescue nil )
jdbc_module = ::Jdbc.const_get(name) # e.g. Jdbc::SQLite3
jdbc_module.load_driver if jdbc_module.respond_to?(:load_driver)
end
end
# Attempt to load the JDBC driver class, which should be specified as a string
# containing the driver class name (which JRuby should autoload).
# Note that the string is evaled, so this method is not safe to call with
# untrusted input.
# Raise a Sequel::AdapterNotFound if evaluating the class name raises a NameError.
def self.load_driver(drv, gem=nil)
load_gem(gem) if gem
if drv.is_a?(String)
eval drv
else
*try, last = drv
try.each do |try_drv|
begin
return eval(try_drv)
rescue NameError
end
end
eval last
end
rescue NameError
raise Sequel::AdapterNotFound, "#{drv} not loaded#{", try installing jdbc-#{gem.to_s.downcase} gem" if gem}"
end
class TypeConvertor
CONVERTORS = convertors = {}
%w'Boolean Float Double Int Long Short'.each do |meth|
x = x = convertors[meth.to_sym] = Object.new
class_eval("def x.call(r, i) v = r.get#{meth}(i); v unless r.wasNull end", __FILE__, __LINE__)
end
%w'Object Array String Time Date Timestamp BigDecimal Blob Bytes Clob'.each do |meth|
x = x = convertors[meth.to_sym] = Object.new
class_eval("def x.call(r, i) r.get#{meth}(i) end", __FILE__, __LINE__)
end
x = convertors[:RubyTime] = Object.new
def x.call(r, i)
if v = r.getTime(i)
Sequel.string_to_time("#{v.to_string}.#{sprintf('%03i', v.getTime.divmod(1000).last)}")
end
end
x = convertors[:RubyDate] = Object.new
def x.call(r, i)
if v = r.getDate(i)
Date.civil(v.getYear + 1900, v.getMonth + 1, v.getDate)
end
end
x = convertors[:RubyTimestamp] = Object.new
def x.call(r, i)
if v = r.getTimestamp(i)
Sequel.database_to_application_timestamp([v.getYear + 1900, v.getMonth + 1, v.getDate, v.getHours, v.getMinutes, v.getSeconds, v.getNanos])
end
end
x = convertors[:RubyBigDecimal] = Object.new
def x.call(r, i)
if v = r.getBigDecimal(i)
::Kernel::BigDecimal(v.to_string)
end
end
x = convertors[:RubyBlob] = Object.new
def x.call(r, i)
if v = r.getBytes(i)
Sequel::SQL::Blob.new(String.from_java_bytes(v))
end
end
x = convertors[:RubyClob] = Object.new
def x.call(r, i)
if v = r.getClob(i)
v.getSubString(1, v.length)
end
end
x = convertors[:RubyArray] = Object.new
def x.call(r, i)
if v = r.getArray(i)
v.array.to_ary
end
end
MAP = Hash.new(convertors[:Object])
types = Java::JavaSQL::Types
{
:BOOLEAN => :Boolean,
:CHAR => :String,
:DOUBLE => :Double,
:FLOAT => :Double,
:INTEGER => :Int,
:LONGNVARCHAR => :String,
:LONGVARCHAR => :String,
:NCHAR => :String,
:REAL => :Float,
:SMALLINT => :Short,
:TINYINT => :Short,
:VARCHAR => :String,
}.each do |type, meth|
MAP[types.const_get(type)] = convertors[meth]
end
BASIC_MAP = MAP.dup
{
:ARRAY => :Array,
:BINARY => :Blob,
:BLOB => :Blob,
:CLOB => :Clob,
:DATE => :Date,
:DECIMAL => :BigDecimal,
:LONGVARBINARY => :Blob,
:NCLOB => :Clob,
:NUMERIC => :BigDecimal,
:TIME => :Time,
:TIMESTAMP => :Timestamp,
:VARBINARY => :Blob,
}.each do |type, meth|
BASIC_MAP[types.const_get(type)] = convertors[meth]
MAP[types.const_get(type)] = convertors[:"Ruby#{meth}"]
end
MAP.freeze
BASIC_MAP.freeze
end
class Database < Sequel::Database
set_adapter_scheme :jdbc
# The Java database driver we are using (should be a Java class)
attr_reader :driver
# Whether to convert some Java types to ruby types when retrieving rows.
# True by default, can be set to false to roughly double performance when
# fetching rows.
attr_accessor :convert_types
# The fetch size to use for JDBC Statement objects created by this database.
# By default, this is nil so a fetch size is not set explicitly.
attr_accessor :fetch_size
# Map of JDBC type ids to callable objects that return appropriate ruby values.
attr_reader :type_convertor_map
# Map of JDBC type ids to callable objects that return appropriate ruby or java values.
attr_reader :basic_type_convertor_map
# Execute the given stored procedure with the give name. If a block is
# given, the stored procedure should return rows.
def call_sproc(name, opts = OPTS)
args = opts[:args] || []
sql = "{call #{name}(#{args.map{'?'}.join(',')})}"
synchronize(opts[:server]) do |conn|
begin
cps = conn.prepareCall(sql)
i = 0
args.each{|arg| set_ps_arg(cps, arg, i+=1)}
if defined?(yield)
yield log_connection_yield(sql, conn){cps.executeQuery}
else
log_connection_yield(sql, conn){cps.executeUpdate}
if opts[:type] == :insert
last_insert_id(conn, opts)
end
end
rescue *DATABASE_ERROR_CLASSES => e
raise_error(e)
ensure
cps.close if cps
end
end
end
# Connect to the database using JavaSQL::DriverManager.getConnection, and falling back
# to driver.new.connect if the driver is known.
def connect(server)
opts = server_opts(server)
conn = if jndi?
get_connection_from_jndi
else
args = [uri(opts)]
args.concat([opts[:user], opts[:password]]) if opts[:user] && opts[:password]
begin
JavaSQL::DriverManager.setLoginTimeout(opts[:login_timeout]) if opts[:login_timeout]
raise StandardError, "skipping regular connection" if opts[:jdbc_properties]
JavaSQL::DriverManager.getConnection(*args)
rescue StandardError, *DATABASE_ERROR_CLASSES => e
raise e unless driver
# If the DriverManager can't get the connection - use the connect
# method of the driver. (This happens under Tomcat for instance)
props = java.util.Properties.new
if opts && opts[:user] && opts[:password]
props.setProperty("user", opts[:user])
props.setProperty("password", opts[:password])
end
opts[:jdbc_properties].each{|k,v| props.setProperty(k.to_s, v)} if opts[:jdbc_properties]
begin
c = driver.new.connect(args[0], props)
raise(Sequel::DatabaseError, 'driver.new.connect returned nil: probably bad JDBC connection string') unless c
c
rescue StandardError, *DATABASE_ERROR_CLASSES => e2
if e2.respond_to?(:message=) && e2.message != e.message
e2.message = "#{e2.message}\n#{e.class.name}: #{e.message}"
end
raise e2
end
end
end
setup_connection_with_opts(conn, opts)
end
# Close given adapter connections, and delete any related prepared statements.
def disconnect_connection(c)
@connection_prepared_statements_mutex.synchronize{@connection_prepared_statements.delete(c)}
c.close
end
def execute(sql, opts=OPTS, &block)
return call_sproc(sql, opts, &block) if opts[:sproc]
return execute_prepared_statement(sql, opts, &block) if [Symbol, Dataset].any?{|c| sql.is_a?(c)}
synchronize(opts[:server]) do |conn|
statement(conn) do |stmt|
if block
if size = fetch_size
stmt.setFetchSize(size)
end
yield log_connection_yield(sql, conn){stmt.executeQuery(sql)}
else
case opts[:type]
when :ddl
log_connection_yield(sql, conn){stmt.execute(sql)}
when :insert
log_connection_yield(sql, conn){execute_statement_insert(stmt, sql)}
opts = Hash[opts]
opts[:stmt] = stmt
last_insert_id(conn, opts)
else
log_connection_yield(sql, conn){stmt.executeUpdate(sql)}
end
end
end
end
end
alias execute_dui execute
def execute_ddl(sql, opts=OPTS)
opts = Hash[opts]
opts[:type] = :ddl
execute(sql, opts)
end
def execute_insert(sql, opts=OPTS)
opts = Hash[opts]
opts[:type] = :insert
execute(sql, opts)
end
def freeze
@type_convertor_map.freeze
@basic_type_convertor_map.freeze
super
end
# Use the JDBC metadata to get a list of foreign keys for the table.
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
schema, table = metadata_schema_and_table(table, opts)
foreign_keys = {}
metadata(:getImportedKeys, nil, schema, table) do |r|
if fk = foreign_keys[r[:fk_name]]
fk[:columns] << [r[:key_seq], m.call(r[:fkcolumn_name])]
fk[:key] << [r[:key_seq], m.call(r[:pkcolumn_name])]
elsif r[:fk_name]
foreign_keys[r[:fk_name]] = {:name=>m.call(r[:fk_name]), :columns=>[[r[:key_seq], m.call(r[:fkcolumn_name])]], :table=>m.call(r[:pktable_name]), :key=>[[r[:key_seq], m.call(r[:pkcolumn_name])]]}
end
end
foreign_keys.values.each do |fk|
[:columns, :key].each do |k|
fk[k] = fk[k].sort.map{|_, v| v}
end
end
end
# Use the JDBC metadata to get the index information for the table.
def indexes(table, opts=OPTS)
m = output_identifier_meth
schema, table = metadata_schema_and_table(table, opts)
indexes = {}
metadata(:getIndexInfo, nil, schema, table, false, true) do |r|
next unless name = r[:column_name]
next if respond_to?(:primary_key_index_re, true) and r[:index_name] =~ primary_key_index_re
i = indexes[m.call(r[:index_name])] ||= {:columns=>[], :unique=>[false, 0].include?(r[:non_unique])}
i[:columns] << m.call(name)
end
indexes
end
# Whether or not JNDI is being used for this connection.
def jndi?
!!(uri =~ JNDI_URI_REGEXP)
end
# All tables in this database
def tables(opts=OPTS)
get_tables('TABLE', opts)
end
# The uri for this connection. You can specify the uri
# using the :uri, :url, or :database options. You don't
# need to worry about this if you use Sequel.connect
# with the JDBC connectrion strings.
def uri(opts=OPTS)
opts = @opts.merge(opts)
ur = opts[:uri] || opts[:url] || opts[:database]
ur =~ /^\Ajdbc:/ ? ur : "jdbc:#{ur}"
end
# All views in this database
def views(opts=OPTS)
get_tables('VIEW', opts)
end
private
# Call the DATABASE_SETUP proc directly after initialization,
# so the object always uses sub adapter specific code. Also,
# raise an error immediately if the connection doesn't have a
# uri, since JDBC requires one.
def adapter_initialize
@connection_prepared_statements = {}
@connection_prepared_statements_mutex = Mutex.new
@fetch_size = @opts[:fetch_size] ? typecast_value_integer(@opts[:fetch_size]) : default_fetch_size
@convert_types = typecast_value_boolean(@opts.fetch(:convert_types, true))
raise(Error, "No connection string specified") unless uri
resolved_uri = jndi? ? get_uri_from_jndi : uri
setup_type_convertor_map_early
@driver = if (match = /\Ajdbc:([^:]+)/.match(resolved_uri)) && (prok = Sequel::Database.load_adapter(match[1].to_sym, :map=>DATABASE_SETUP, :subdir=>'jdbc'))
prok.call(self)
else
@opts[:driver]
end
setup_type_convertor_map
end
# Yield the native prepared statements hash for the given connection
# to the block in a thread-safe manner.
def cps_sync(conn, &block)
@connection_prepared_statements_mutex.synchronize{yield(@connection_prepared_statements[conn] ||= {})}
end
def database_error_classes
DATABASE_ERROR_CLASSES
end
def database_exception_sqlstate(exception, opts)
if database_exception_use_sqlstates?
while exception.respond_to?(:cause)
exception = exception.cause
return exception.getSQLState if exception.respond_to?(:getSQLState)
end
end
nil
end
# Whether the JDBC subadapter should use SQL states for exception handling, true by default.
def database_exception_use_sqlstates?
true
end
def dataset_class_default
Dataset
end
# Raise a disconnect error if the SQL state of the cause of the exception indicates so.
def disconnect_error?(exception, opts)
cause = exception.respond_to?(:cause) ? exception.cause : exception
super || (cause.respond_to?(:getSQLState) && cause.getSQLState =~ /^08/)
end
# Execute the prepared statement. If the provided name is a
# dataset, use that as the prepared statement, otherwise use
# it as a key to look it up in the prepared_statements hash.
# If the connection we are using has already prepared an identical
# statement, use that statement instead of creating another.
# Otherwise, prepare a new statement for the connection, bind the
# variables, and execute it.
def execute_prepared_statement(name, opts=OPTS)
args = opts[:arguments]
if name.is_a?(Dataset)
ps = name
name = ps.prepared_statement_name
else
ps = prepared_statement(name)
end
sql = ps.prepared_sql
synchronize(opts[:server]) do |conn|
if name and cps = cps_sync(conn){|cpsh| cpsh[name]} and cps[0] == sql
cps = cps[1]
else
log_connection_yield("CLOSE #{name}", conn){cps[1].close} if cps
if name
opts = Hash[opts]
opts[:name] = name
end
cps = log_connection_yield("PREPARE#{" #{name}:" if name} #{sql}", conn){prepare_jdbc_statement(conn, sql, opts)}
if size = fetch_size
cps.setFetchSize(size)
end
cps_sync(conn){|cpsh| cpsh[name] = [sql, cps]} if name
end
i = 0
args.each{|arg| set_ps_arg(cps, arg, i+=1)}
msg = "EXECUTE#{" #{name}" if name}"
if ps.log_sql
msg += " ("
msg << sql
msg << ")"
end
begin
if defined?(yield)
yield log_connection_yield(msg, conn, args){cps.executeQuery}
else
case opts[:type]
when :ddl
log_connection_yield(msg, conn, args){cps.execute}
when :insert
log_connection_yield(msg, conn, args){execute_prepared_statement_insert(cps)}
opts = Hash[opts]
opts[:prepared] = true
opts[:stmt] = cps
last_insert_id(conn, opts)
else
log_connection_yield(msg, conn, args){cps.executeUpdate}
end
end
rescue *DATABASE_ERROR_CLASSES => e
raise_error(e)
ensure
cps.close unless name
end
end
end
# Execute the prepared insert statement
def execute_prepared_statement_insert(stmt)
stmt.executeUpdate
end
# Execute the insert SQL using the statement
def execute_statement_insert(stmt, sql)
stmt.executeUpdate(sql)
end
# The default fetch size to use for statements. Nil by default, so that the
# default for the JDBC driver is used.
def default_fetch_size
nil
end
# Gets the connection from JNDI.
def get_connection_from_jndi
jndi_name = JNDI_URI_REGEXP.match(uri)[1]
javax.naming.InitialContext.new.lookup(jndi_name).connection
end
# Gets the JDBC connection uri from the JNDI resource.
def get_uri_from_jndi
conn = get_connection_from_jndi
conn.meta_data.url
ensure
conn.close if conn
end
# Backbone of the tables and views support.
def get_tables(type, opts)
ts = []
m = output_identifier_meth
if schema = opts[:schema]
schema = schema.to_s
end
metadata(:getTables, nil, schema, nil, [type].to_java(:string)){|h| ts << m.call(h[:table_name])}
ts
end
# Support Date objects used in bound variables
def java_sql_date(date)
java.sql.Date.new(Time.local(date.year, date.month, date.day).to_i * 1000)
end
# Support DateTime objects used in bound variables
def java_sql_datetime(datetime)
ts = java.sql.Timestamp.new(Time.local(datetime.year, datetime.month, datetime.day, datetime.hour, datetime.min, datetime.sec).to_i * 1000)
ts.setNanos((datetime.sec_fraction * 1000000000).to_i)
ts
end
# Support fractional seconds for Time objects used in bound variables
def java_sql_timestamp(time)
ts = java.sql.Timestamp.new(time.to_i * 1000)
ts.setNanos(time.nsec)
ts
end
def log_connection_execute(conn, sql)
statement(conn){|s| log_connection_yield(sql, conn){s.execute(sql)}}
end
# By default, there is no support for determining the last inserted
# id, so return nil. This method should be overridden in
# subadapters.
def last_insert_id(conn, opts)
nil
end
# Yield the metadata for this database
def metadata(*args, &block)
synchronize do |c|
result = c.getMetaData.public_send(*args)
begin
metadata_dataset.send(:process_result_set, result, &block)
ensure
result.close
end
end
end
# Return the schema and table suitable for use with metadata queries.
def metadata_schema_and_table(table, opts)
im = input_identifier_meth(opts[:dataset])
schema, table = schema_and_table(table)
schema ||= opts[:schema]
schema = im.call(schema) if schema
table = im.call(table)
[schema, table]
end
# Created a JDBC prepared statement on the connection with the given SQL.
def prepare_jdbc_statement(conn, sql, opts)
conn.prepareStatement(sql)
end
# Java being java, you need to specify the type of each argument
# for the prepared statement, and bind it individually. This
# guesses which JDBC method to use, and hopefully JRuby will convert
# things properly for us.
def set_ps_arg(cps, arg, i)
case arg
when Integer
cps.setLong(i, arg)
when Sequel::SQL::Blob
cps.setBytes(i, arg.to_java_bytes)
when String
cps.setString(i, arg)
when Float
cps.setDouble(i, arg)
when TrueClass, FalseClass
cps.setBoolean(i, arg)
when NilClass
set_ps_arg_nil(cps, i)
when DateTime
cps.setTimestamp(i, java_sql_datetime(arg))
when Date
cps.setDate(i, java_sql_date(arg))
when Time
cps.setTimestamp(i, java_sql_timestamp(arg))
when Java::JavaSql::Timestamp
cps.setTimestamp(i, arg)
when Java::JavaSql::Date
cps.setDate(i, arg)
else
cps.setObject(i, arg)
end
end
# Use setString with a nil value by default, but this doesn't work on all subadapters.
def set_ps_arg_nil(cps, i)
cps.setString(i, nil)
end
# Return the connection. Can be overridden in subadapters for database specific setup.
def setup_connection(conn)
conn
end
# Setup the connection using the given connection options. Return the connection. Can be overridden in subadapters for database specific setup.
def setup_connection_with_opts(conn, opts)
setup_connection(conn)
end
def schema_column_set_db_type(schema)
case schema[:type]
when :string
if schema[:db_type] =~ /\A(character( varying)?|n?(var)?char2?)\z/io && schema[:column_size] > 0
schema[:db_type] += "(#{schema[:column_size]})"
end
when :decimal
if schema[:db_type] =~ /\A(decimal|numeric)\z/io && schema[:column_size] > 0 && schema[:scale] >= 0
schema[:db_type] += "(#{schema[:column_size]}, #{schema[:scale]})"
end
end
end
def schema_parse_table(table, opts=OPTS)
m = output_identifier_meth(opts[:dataset])
schema, table = metadata_schema_and_table(table, opts)
pks, ts = [], []
metadata(:getPrimaryKeys, nil, schema, table) do |h|
next if schema_parse_table_skip?(h, schema)
pks << h[:column_name]
end
schemas = []
metadata(:getColumns, nil, schema, table, nil) do |h|
next if schema_parse_table_skip?(h, schema)
s = {
:type=>schema_column_type(h[:type_name]),
:db_type=>h[:type_name],
:default=>(h[:column_def] == '' ? nil : h[:column_def]),
:allow_null=>(h[:nullable] != 0),
:primary_key=>pks.include?(h[:column_name]),
:column_size=>h[:column_size],
:scale=>h[:decimal_digits],
:remarks=>h[:remarks]
}
if s[:primary_key]
s[:auto_increment] = h[:is_autoincrement] == "YES"
end
s[:max_length] = s[:column_size] if s[:type] == :string
if s[:db_type] =~ /number|numeric|decimal/i && s[:scale] == 0
s[:type] = :integer
end
schema_column_set_db_type(s)
schemas << h[:table_schem] unless schemas.include?(h[:table_schem])
ts << [m.call(h[:column_name]), s]
end
if schemas.length > 1
raise Error, 'Schema parsing in the jdbc adapter resulted in columns being returned for a table with the same name in multiple schemas. Please explicitly qualify your table with a schema.'
end
ts
end
# Skip tables in the INFORMATION_SCHEMA when parsing columns.
def schema_parse_table_skip?(h, schema)
h[:table_schem] == 'INFORMATION_SCHEMA'
end
# Called after loading subadapter-specific code, overridable by subadapters.
def setup_type_convertor_map
end
# Called before loading subadapter-specific code, necessary so that subadapter initialization code
# that runs queries works correctly. This cannot be overridden in subadapters.
def setup_type_convertor_map_early
@type_convertor_map = TypeConvertor::MAP.merge(Java::JavaSQL::Types::TIMESTAMP=>method(:timestamp_convert))
@basic_type_convertor_map = TypeConvertor::BASIC_MAP.dup
end
# Yield a new statement object, and ensure that it is closed before returning.
def statement(conn)
stmt = conn.createStatement
yield stmt
rescue *DATABASE_ERROR_CLASSES => e
raise_error(e)
ensure
stmt.close if stmt
end
# A conversion method for timestamp columns. This is used to make sure timestamps are converted using the
# correct timezone.
def timestamp_convert(r, i)
if v = r.getTimestamp(i)
to_application_timestamp([v.getYear + 1900, v.getMonth + 1, v.getDate, v.getHours, v.getMinutes, v.getSeconds, v.getNanos])
end
end
end
class Dataset < Sequel::Dataset
include StoredProcedures
PreparedStatementMethods = prepared_statements_module(
"sql = self; opts = Hash[opts]; opts[:arguments] = bind_arguments",
Sequel::Dataset::UnnumberedArgumentMapper,
%w"execute execute_dui") do
private
def execute_insert(sql, opts=OPTS)
sql = self
opts = Hash[opts]
opts[:arguments] = bind_arguments
opts[:type] = :insert
super
end
end
StoredProcedureMethods = prepared_statements_module(
"sql = @opts[:sproc_name]; opts = Hash[opts]; opts[:args] = @opts[:sproc_args]; opts[:sproc] = true",
Sequel::Dataset::StoredProcedureMethods,
%w"execute execute_dui") do
private
def execute_insert(sql, opts=OPTS)
sql = @opts[:sproc_name]
opts = Hash[opts]
opts[:args] = @opts[:sproc_args]
opts[:sproc] = true
opts[:type] = :insert
super
end
end
def fetch_rows(sql, &block)
execute(sql){|result| process_result_set(result, &block)}
self
end
# Set the fetch size on JDBC ResultSets created from the returned dataset.
def with_fetch_size(size)
clone(:fetch_size=>size)
end
# Set whether to convert Java types to ruby types in the returned dataset.
def with_convert_types(v)
clone(:convert_types=>v)
end
private
# Whether we should convert Java types to ruby types for this dataset.
def convert_types?
ct = @opts[:convert_types]
ct.nil? ? db.convert_types : ct
end
# Extend the dataset with the JDBC stored procedure methods.
def prepare_extend_sproc(ds)
ds.with_extend(StoredProcedureMethods)
end
# The type conversion proc to use for the given column number i,
# given the type conversion map and the ResultSetMetaData.
def type_convertor(map, meta, type, i)
map[type]
end
# The basic type conversion proc to use for the given column number i,
# given the type conversion map and the ResultSetMetaData.
#
# This is implemented as a separate method so that subclasses can
# override the methods separately.
def basic_type_convertor(map, meta, type, i)
map[type]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
# Split out from fetch rows to allow processing of JDBC result sets
# that don't come from issuing an SQL string.
def process_result_set(result)
meta = result.getMetaData
if fetch_size = opts[:fetch_size]
result.setFetchSize(fetch_size)
end
cols = []
i = 0
convert = convert_types?
map = convert ? db.type_convertor_map : db.basic_type_convertor_map
meta.getColumnCount.times do
i += 1
cols << [output_identifier(meta.getColumnLabel(i)), i, convert ? type_convertor(map, meta, meta.getColumnType(i), i) : basic_type_convertor(map, meta, meta.getColumnType(i), i)]
end
max = i
self.columns = cols.map{|c| c[0]}
while result.next
row = {}
i = -1
while (i += 1) < max
n, j, pr = cols[i]
row[n] = pr.call(result, j)
end
yield row
end
ensure
result.close
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/ 0000775 0000000 0000000 00000000000 14342141206 0017651 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/jdbc/db2.rb 0000664 0000000 0000000 00000004256 14342141206 0020654 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('com.ibm.db2.jcc.DB2Driver')
require_relative '../shared/db2'
require_relative 'transactions'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:db2] = proc do |db|
db.singleton_class.class_eval do
alias jdbc_schema_parse_table schema_parse_table
alias jdbc_tables tables
alias jdbc_views views
alias jdbc_indexes indexes
include Sequel::JDBC::DB2::DatabaseMethods
alias schema_parse_table jdbc_schema_parse_table
alias tables jdbc_tables
alias views jdbc_views
alias indexes jdbc_indexes
%w'schema_parse_table tables views indexes'.each do |s|
remove_method(:"jdbc_#{s}")
end
end
db.extend_datasets Sequel::DB2::DatasetMethods
com.ibm.db2.jcc.DB2Driver
end
end
module DB2
module DatabaseMethods
include Sequel::DB2::DatabaseMethods
include Sequel::JDBC::Transactions
private
def set_ps_arg(cps, arg, i)
case arg
when Sequel::SQL::Blob
if use_clob_as_blob
cps.setString(i, arg)
else
super
end
else
super
end
end
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
sql = "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM.SYSDUMMY1"
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
# Primary key indexes appear to be named sqlNNNN on DB2
def primary_key_index_re
/\Asql\d+\z/i
end
def setup_type_convertor_map
super
map = @type_convertor_map
types = Java::JavaSQL::Types
map[types::NCLOB] = map[types::CLOB] = method(:convert_clob)
end
def convert_clob(r, i)
if v = r.getClob(i)
v = v.getSubString(1, v.length)
v = Sequel::SQL::Blob.new(v) if use_clob_as_blob
v
end
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/derby.rb 0000664 0000000 0000000 00000024300 14342141206 0021302 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('org.apache.derby.jdbc.EmbeddedDriver', :Derby)
require_relative 'transactions'
require_relative '../utils/columns_limit_1'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:derby] = proc do |db|
db.extend(Sequel::JDBC::Derby::DatabaseMethods)
db.dataset_class = Sequel::JDBC::Derby::Dataset
org.apache.derby.jdbc.EmbeddedDriver
end
end
module Derby
module DatabaseMethods
include ::Sequel::JDBC::Transactions
# Derby doesn't support casting integer to varchar, only integer to char,
# and char(254) appears to have the widest support (with char(255) failing).
# This does add a bunch of extra spaces at the end, but those will be trimmed
# elsewhere.
def cast_type_literal(type)
(type == String) ? 'CHAR(254)' : super
end
def database_type
:derby
end
def freeze
svn_version
super
end
# Derby uses an IDENTITY sequence for autoincrementing columns.
def serial_primary_key_options
{:primary_key => true, :type => Integer, :identity=>true, :start_with=>1}
end
# The SVN version of the database.
def svn_version
@svn_version ||= begin
v = synchronize{|c| c.get_meta_data.get_database_product_version}
v =~ /\((\d+)\)\z/
$1.to_i
end
end
# Derby supports transactional DDL statements.
def supports_transactional_ddl?
true
end
private
# Derby optimizes away Sequel's default check of SELECT NULL FROM table,
# so use a SELECT * FROM table there.
def _table_exists?(ds)
ds.first
end
def alter_table_sql(table, op)
case op[:op]
when :rename_column
"RENAME COLUMN #{quote_schema_table(table)}.#{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name])}"
when :set_column_type
# Derby is very limited in changing a columns type, so adding a new column and then dropping the existing column is
# the best approach, as mentioned in the Derby documentation.
temp_name = :x_sequel_temp_column_x
[alter_table_sql(table, op.merge(:op=>:add_column, :name=>temp_name)),
from(table).update_sql(temp_name=>::Sequel::SQL::Cast.new(op[:name], op[:type])),
alter_table_sql(table, op.merge(:op=>:drop_column)),
alter_table_sql(table, op.merge(:op=>:rename_column, :name=>temp_name, :new_name=>op[:name]))]
when :set_column_null
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} #{op[:null] ? 'NULL' : 'NOT NULL'}"
else
super
end
end
# Derby does not allow adding primary key constraints to NULLable columns.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
# Derby doesn't allow specifying NULL for columns, only NOT NULL.
def column_definition_null_sql(sql, column)
null = column.fetch(:null, column[:allow_null])
sql << " NOT NULL" if null == false || (null.nil? && column[:primary_key])
end
# Add NOT LOGGED for temporary tables to improve performance.
def create_table_sql(name, generator, options)
s = super
s += ' NOT LOGGED' if options[:temp]
s
end
# Insert data from the current table into the new table after
# creating the table, since it is not possible to do it in one step.
def create_table_as(name, sql, options)
super
from(name).insert(sql.is_a?(Dataset) ? sql : dataset.with_sql(sql))
end
# Derby currently only requires WITH NO DATA, with a separate insert
# to import data.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS #{sql} WITH NO DATA"
end
# Temporary table creation on Derby uses DECLARE instead of CREATE.
def create_table_prefix_sql(name, options)
if options[:temp]
"DECLARE GLOBAL TEMPORARY TABLE #{quote_identifier(name)}"
else
super
end
end
DATABASE_ERROR_REGEXPS = {
/The statement was aborted because it would have caused a duplicate key value in a unique or primary key constraint or unique index/ => UniqueConstraintViolation,
/violation of foreign key constraint/ => ForeignKeyConstraintViolation,
/The check constraint .+ was violated/ => CheckConstraintViolation,
/cannot accept a NULL value/ => NotNullConstraintViolation,
/A lock could not be obtained due to a deadlock/ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Use IDENTITY_VAL_LOCAL() to get the last inserted id.
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
sql = 'SELECT IDENTITY_VAL_LOCAL() FROM sysibm.sysdummy1'
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
# Handle nil values by using setNull with the correct parameter type.
def set_ps_arg_nil(cps, i)
cps.setNull(i, cps.getParameterMetaData.getParameterType(i))
end
# Derby uses RENAME TABLE syntax to rename tables.
def rename_table_sql(name, new_name)
"RENAME TABLE #{quote_schema_table(name)} TO #{quote_schema_table(new_name)}"
end
# Primary key indexes appear to be named sqlNNNN on Derby
def primary_key_index_re
/\Asql\d+\z/i
end
# If an :identity option is present in the column, add the necessary IDENTITY SQL.
def type_literal(column)
if column[:identity]
sql = "#{super} GENERATED BY DEFAULT AS IDENTITY"
if sw = column[:start_with]
sql += " (START WITH #{sw.to_i}"
sql << " INCREMENT BY #{column[:increment_by].to_i}" if column[:increment_by]
sql << ")"
end
sql
else
super
end
end
# Derby uses clob for text types.
def uses_clob_for_text?
true
end
def valid_connection_sql
@valid_connection_sql ||= select(1).sql
end
end
class Dataset < JDBC::Dataset
include ::Sequel::Dataset::ColumnsLimit1
# Derby doesn't support an expression between CASE and WHEN,
# so remove conditions.
def case_expression_sql_append(sql, ce)
super(sql, ce.with_merged_expression)
end
# If the type is String, trim the extra spaces since CHAR is used instead
# of varchar. This can cause problems if you are casting a char/varchar to
# a string and the ending whitespace is important.
def cast_sql_append(sql, expr, type)
if type == String
sql << "RTRIM("
super
sql << ')'
else
super
end
end
def complex_expression_sql_append(sql, op, args)
case op
when :%, :'B~'
complex_expression_emulate_append(sql, op, args)
when :&, :|, :^, :<<, :>>
raise Error, "Derby doesn't support the #{op} operator"
when :**
sql << 'exp('
literal_append(sql, args[1])
sql << ' * ln('
literal_append(sql, args[0])
sql << "))"
when :extract
sql << args[0].to_s << '('
literal_append(sql, args[1])
sql << ')'
else
super
end
end
# Derby supports GROUP BY ROLLUP (but not CUBE)
def supports_group_rollup?
true
end
# Derby does not support IS TRUE.
def supports_is_true?
false
end
# Derby 10.11+ supports MERGE.
def supports_merge?
db.svn_version >= 1616546
end
# Derby does not support IN/NOT IN with multiple columns
def supports_multiple_column_in?
false
end
private
def empty_from_sql
" FROM sysibm.sysdummy1"
end
# Derby needs a hex string casted to BLOB for blobs.
def literal_blob_append(sql, v)
sql << "CAST(X'" << v.unpack("H*").first << "' AS BLOB)"
end
# Derby needs the standard workaround to insert all default values into
# a table with more than one column.
def insert_supports_empty_values?
false
end
# Newer Derby versions can use the FALSE literal, but older versions need an always false expression.
def literal_false
if db.svn_version >= 1040133
'FALSE'
else
'(1 = 0)'
end
end
# Derby handles fractional seconds in timestamps, but not in times
def literal_sqltime(v)
v.strftime("'%H:%M:%S'")
end
# Newer Derby versions can use the TRUE literal, but older versions need an always false expression.
def literal_true
if db.svn_version >= 1040133
'TRUE'
else
'(1 = 1)'
end
end
# Derby supports multiple rows for VALUES in INSERT.
def multi_insert_sql_strategy
:values
end
# Emulate the char_length function with length
def native_function_name(emulated_function)
if emulated_function == :char_length
'length'
else
super
end
end
# Offset comes before limit in Derby
def select_limit_sql(sql)
if o = @opts[:offset]
sql << " OFFSET "
literal_append(sql, o)
sql << " ROWS"
end
if l = @opts[:limit]
sql << " FETCH FIRST "
literal_append(sql, l)
sql << " ROWS ONLY"
end
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/h2.rb 0000664 0000000 0000000 00000021160 14342141206 0020507 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('org.h2.Driver', :H2)
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:h2] = proc do |db|
db.extend(Sequel::JDBC::H2::DatabaseMethods)
db.dataset_class = Sequel::JDBC::H2::Dataset
org.h2.Driver
end
end
module H2
module DatabaseMethods
def commit_prepared_transaction(transaction_id, opts=OPTS)
run("COMMIT TRANSACTION #{transaction_id}", opts)
end
def database_type
:h2
end
def freeze
h2_version
version2?
super
end
def h2_version
@h2_version ||= get(Sequel.function(:H2VERSION))
end
def rollback_prepared_transaction(transaction_id, opts=OPTS)
run("ROLLBACK TRANSACTION #{transaction_id}", opts)
end
# H2 uses an IDENTITY type for primary keys
def serial_primary_key_options
{:primary_key => true, :type => :identity, :identity=>true}
end
# H2 supports CREATE TABLE IF NOT EXISTS syntax
def supports_create_table_if_not_exists?
true
end
# H2 supports prepared transactions
def supports_prepared_transactions?
true
end
# H2 supports savepoints
def supports_savepoints?
true
end
private
# H2 does not allow adding primary key constraints to NULLable columns.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
# If the :prepare option is given and we aren't in a savepoint,
# prepare the transaction for a two-phase commit.
def commit_transaction(conn, opts=OPTS)
if (s = opts[:prepare]) && savepoint_level(conn) <= 1
log_connection_execute(conn, "PREPARE COMMIT #{s}")
else
super
end
end
def alter_table_sql(table, op)
case op[:op]
when :add_column
if (pk = op.delete(:primary_key)) || (ref = op.delete(:table))
if pk
op[:null] = false
end
sqls = [super(table, op)]
if pk && (h2_version >= '1.4' || op[:type] != :identity)
# H2 needs to add a primary key column as a constraint in this case
sqls << "ALTER TABLE #{quote_schema_table(table)} ADD PRIMARY KEY (#{quote_identifier(op[:name])})"
end
if ref
op[:table] = ref
constraint_name = op[:foreign_key_constraint_name]
sqls << "ALTER TABLE #{quote_schema_table(table)} ADD#{" CONSTRAINT #{quote_identifier(constraint_name)}" if constraint_name} FOREIGN KEY (#{quote_identifier(op[:name])}) #{column_references_sql(op)}"
end
sqls
else
super(table, op)
end
when :rename_column
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} RENAME TO #{quote_identifier(op[:new_name])}"
when :set_column_null
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET#{' NOT' unless op[:null]} NULL"
when :set_column_type
if sch = schema(table)
if cs = sch.each{|k, v| break v if k == op[:name]; nil}
cs = cs.dup
cs[:default] = cs[:ruby_default]
op = cs.merge!(op)
end
end
sql = "ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} #{type_literal(op)}".dup
column_definition_order.each{|m| send(:"column_definition_#{m}_sql", sql, op)}
sql
when :drop_constraint
if op[:type] == :primary_key
"ALTER TABLE #{quote_schema_table(table)} DROP PRIMARY KEY"
else
super(table, op)
end
else
super(table, op)
end
end
# Default to a single connection for a memory database.
def connection_pool_default_options
o = super
uri == 'jdbc:h2:mem:' ? o.merge(:max_connections=>1) : o
end
DATABASE_ERROR_REGEXPS = {
/Unique index or primary key violation/ => UniqueConstraintViolation,
/Referential integrity constraint violation/ => ForeignKeyConstraintViolation,
/Check constraint violation/ => CheckConstraintViolation,
/NULL not allowed for column/ => NotNullConstraintViolation,
/Deadlock detected\. The current transaction was rolled back\./ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
def execute_statement_insert(stmt, sql)
stmt.executeUpdate(sql, JavaSQL::Statement::RETURN_GENERATED_KEYS)
end
def prepare_jdbc_statement(conn, sql, opts)
opts[:type] == :insert ? conn.prepareStatement(sql, JavaSQL::Statement::RETURN_GENERATED_KEYS) : super
end
# Get the last inserted id using getGeneratedKeys, scope_identity, or identity.
def last_insert_id(conn, opts=OPTS)
if stmt = opts[:stmt]
rs = stmt.getGeneratedKeys
begin
if rs.next
begin
rs.getLong(1)
rescue
rs.getObject(1) rescue nil
end
end
ensure
rs.close
end
elsif !version2?
statement(conn) do |stmt|
sql = 'SELECT IDENTITY()'
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
end
def primary_key_index_re
/\Aprimary_key/i
end
# H2 does not support named column constraints.
def supports_named_column_constraints?
false
end
# Use BIGINT IDENTITY for identity columns that use :Bignum type
def type_literal_generic_bignum_symbol(column)
column[:identity] ? 'BIGINT AUTO_INCREMENT' : super
end
def version2?
return @version2 if defined?(@version2)
@version2 = h2_version.to_i >= 2
end
end
class Dataset < JDBC::Dataset
ILIKE_PLACEHOLDER = ["CAST(".freeze, " AS VARCHAR_IGNORECASE)".freeze].freeze
# Emulate the case insensitive LIKE operator and the bitwise operators.
def complex_expression_sql_append(sql, op, args)
case op
when :ILIKE, :"NOT ILIKE"
super(sql, (op == :ILIKE ? :LIKE : :"NOT LIKE"), [SQL::PlaceholderLiteralString.new(ILIKE_PLACEHOLDER, [args[0]]), args[1]])
when :&, :|, :^, :<<, :>>, :'B~'
complex_expression_emulate_append(sql, op, args)
else
super
end
end
# H2 does not support derived column lists
def supports_derived_column_lists?
false
end
# H2 requires SQL standard datetimes
def requires_sql_standard_datetimes?
true
end
# H2 doesn't support IS TRUE
def supports_is_true?
false
end
# H2 doesn't support JOIN USING
def supports_join_using?
false
end
# H2 supports MERGE
def supports_merge?
true
end
# H2 doesn't support multiple columns in IN/NOT IN
def supports_multiple_column_in?
false
end
private
# H2 expects hexadecimal strings for blob values
def literal_blob_append(sql, v)
if db.send(:version2?)
super
else
sql << "'" << v.unpack("H*").first << "'"
end
end
def literal_false
'FALSE'
end
def literal_true
'TRUE'
end
# H2 handles fractional seconds in timestamps, but not in times
def literal_sqltime(v)
v.strftime("'%H:%M:%S'")
end
# H2 supports multiple rows in INSERT.
def multi_insert_sql_strategy
:values
end
def select_only_offset_sql(sql)
if db.send(:version2?)
super
else
sql << " LIMIT -1 OFFSET "
literal_append(sql, @opts[:offset])
end
end
# H2 supports quoted function names.
def supports_quoted_function_names?
true
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/hsqldb.rb 0000664 0000000 0000000 00000016740 14342141206 0021463 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('org.hsqldb.jdbcDriver', :HSQLDB)
require_relative 'transactions'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:hsqldb] = proc do |db|
db.extend(Sequel::JDBC::HSQLDB::DatabaseMethods)
db.dataset_class = Sequel::JDBC::HSQLDB::Dataset
org.hsqldb.jdbcDriver
end
end
module HSQLDB
module DatabaseMethods
include ::Sequel::JDBC::Transactions
def database_type
:hsqldb
end
def freeze
db_version
super
end
# HSQLDB uses an IDENTITY sequence as the default value for primary
# key columns.
def serial_primary_key_options
{:primary_key => true, :type => :integer, :identity=>true, :start_with=>1}
end
# The version of the database, as an integer (e.g 2.2.5 -> 20205)
def db_version
return @db_version if defined?(@db_version)
v = get(Sequel.function(:DATABASE_VERSION))
@db_version = if v =~ /(\d+)\.(\d+)\.(\d+)/
$1.to_i * 10000 + $2.to_i * 100 + $3.to_i
end
end
# HSQLDB supports DROP TABLE IF EXISTS
def supports_drop_table_if_exists?
true
end
private
def alter_table_sql(table, op)
case op[:op]
when :add_column
if op[:table]
[super(table, op.merge(:table=>nil)),
alter_table_sql(table, op.merge(:op=>:add_constraint, :type=>:foreign_key, :name=>op[:foreign_key_constraint_name], :columns=>[op[:name]], :table=>op[:table]))]
else
super
end
when :rename_column
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} RENAME TO #{quote_identifier(op[:new_name])}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DATA TYPE #{type_literal(op)}"
when :set_column_null
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET #{op[:null] ? 'NULL' : 'NOT NULL'}"
else
super
end
end
# HSQLDB requires parens around the SELECT, and the WITH DATA syntax.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS (#{sql}) WITH DATA"
end
DATABASE_ERROR_REGEXPS = {
/integrity constraint violation: unique constraint or index violation/ => UniqueConstraintViolation,
/integrity constraint violation: foreign key/ => ForeignKeyConstraintViolation,
/integrity constraint violation: check constraint/ => CheckConstraintViolation,
/integrity constraint violation: NOT NULL check constraint/ => NotNullConstraintViolation,
/serialization failure/ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# IF EXISTS comes after table name on HSQLDB
def drop_table_sql(name, options)
"DROP TABLE #{quote_schema_table(name)}#{' IF EXISTS' if options[:if_exists]}#{' CASCADE' if options[:cascade]}"
end
# IF EXISTS comes after view name on HSQLDB
def drop_view_sql(name, options)
"DROP VIEW #{quote_schema_table(name)}#{' IF EXISTS' if options[:if_exists]}#{' CASCADE' if options[:cascade]}"
end
# Use IDENTITY() to get the last inserted id.
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
sql = 'CALL IDENTITY()'
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
# Primary key indexes appear to start with sys_idx_sys_pk_ on HSQLDB
def primary_key_index_re
/\Asys_idx_sys_pk_/i
end
# If an :identity option is present in the column, add the necessary IDENTITY SQL.
# It's possible to use an IDENTITY type, but that defaults the sequence to start
# at 0 instead of 1, and we don't want that.
def type_literal(column)
if column[:identity]
sql = "#{super} GENERATED BY DEFAULT AS IDENTITY"
if sw = column[:start_with]
sql += " (START WITH #{sw.to_i}"
sql << " INCREMENT BY #{column[:increment_by].to_i}" if column[:increment_by]
sql << ")"
end
sql
else
super
end
end
# HSQLDB uses clob for text types.
def uses_clob_for_text?
true
end
# HSQLDB supports views with check option.
def view_with_check_option_support
:local
end
end
class Dataset < JDBC::Dataset
# Handle HSQLDB specific case insensitive LIKE and bitwise operator support.
def complex_expression_sql_append(sql, op, args)
case op
when :ILIKE, :"NOT ILIKE"
super(sql, (op == :ILIKE ? :LIKE : :"NOT LIKE"), args.map{|v| SQL::Function.new(:ucase, v)})
when :&, :|, :^, :%, :<<, :>>, :'B~'
complex_expression_emulate_append(sql, op, args)
else
super
end
end
# HSQLDB requires recursive CTEs to have column aliases.
def recursive_cte_requires_column_aliases?
true
end
# HSQLDB requires SQL standard datetimes in some places.
def requires_sql_standard_datetimes?
true
end
# HSQLDB does support common table expressions, but the support is broken.
# CTEs operate more like temprorary tables or views, lasting longer than the duration of the expression.
# CTEs in earlier queries might take precedence over CTEs with the same name in later queries.
# Also, if any CTE is recursive, all CTEs must be recursive.
# If you want to use CTEs with HSQLDB, you'll have to manually modify the dataset to allow it.
def supports_cte?(type=:select)
false
end
# HSQLDB does not support IS TRUE.
def supports_is_true?
false
end
# HSQLDB supports lateral subqueries.
def supports_lateral_subqueries?
true
end
# HSQLDB 2.3.4+ supports MERGE. Older versions also support MERGE, but not all
# features that are in Sequel's tests.
def supports_merge?
db.db_version >= 20304
end
private
def empty_from_sql
" FROM (VALUES (0))"
end
# Use string in hex format for blob data.
def literal_blob_append(sql, v)
sql << "X'" << v.unpack("H*").first << "'"
end
# HSQLDB uses FALSE for false values.
def literal_false
'FALSE'
end
# HSQLDB handles fractional seconds in timestamps, but not in times
def literal_sqltime(v)
v.strftime("'%H:%M:%S'")
end
# HSQLDB uses TRUE for true values.
def literal_true
'TRUE'
end
# HSQLDB supports multiple rows in INSERT.
def multi_insert_sql_strategy
:values
end
# Use WITH RECURSIVE instead of WITH if any of the CTEs is recursive
def select_with_sql_base
opts[:with].any?{|w| w[:recursive]} ? "WITH RECURSIVE " : super
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/jtds.rb 0000664 0000000 0000000 00000002120 14342141206 0021135 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('Java::net.sourceforge.jtds.jdbc.Driver', :JTDS)
require_relative 'mssql'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:jtds] = proc do |db|
db.extend(Sequel::JDBC::JTDS::DatabaseMethods)
db.extend_datasets Sequel::MSSQL::DatasetMethods
db.send(:set_mssql_unicode_strings)
Java::net.sourceforge.jtds.jdbc.Driver
end
end
module JTDS
module DatabaseMethods
include Sequel::JDBC::MSSQL::DatabaseMethods
private
# JTDS exception handling with SQLState is less accurate than with regexps.
def database_exception_use_sqlstates?
false
end
def disconnect_error?(exception, opts)
super || exception.message =~ /\AInvalid state, the Connection object is closed\.\z/
end
# Handle nil values by using setNull with the correct parameter type.
def set_ps_arg_nil(cps, i)
cps.setNull(i, cps.getParameterMetaData.getParameterType(i))
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/mssql.rb 0000664 0000000 0000000 00000001365 14342141206 0021342 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/mssql'
module Sequel
module JDBC
module MSSQL
module DatabaseMethods
include Sequel::MSSQL::DatabaseMethods
private
# Get the last inserted id using SCOPE_IDENTITY().
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
sql = opts[:prepared] ? 'SELECT @@IDENTITY' : 'SELECT SCOPE_IDENTITY()'
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
# Primary key indexes appear to start with pk__ on MSSQL
def primary_key_index_re
/\Apk__/i
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/mysql.rb 0000664 0000000 0000000 00000006035 14342141206 0021347 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module JDBC
driver = Sequel::JDBC.load_driver(%w'com.mysql.cj.jdbc.Driver com.mysql.jdbc.Driver', :MySQL)
require_relative '../shared/mysql'
Sequel.synchronize do
DATABASE_SETUP[:mysql] = proc do |db|
db.extend(Sequel::JDBC::MySQL::DatabaseMethods)
db.extend_datasets Sequel::MySQL::DatasetMethods
driver
end
end
module MySQL
module DatabaseMethods
include Sequel::MySQL::DatabaseMethods
private
# MySQL exception handling with SQLState is less accurate than with regexps.
def database_exception_use_sqlstates?
false
end
# Raise a disconnect error if the SQL state of the cause of the exception indicates so.
def disconnect_error?(exception, opts)
exception.message =~ /\ACommunications link failure/ || super
end
# Get the last inserted id using LAST_INSERT_ID().
def last_insert_id(conn, opts=OPTS)
if stmt = opts[:stmt]
rs = stmt.getGeneratedKeys
begin
if rs.next
rs.getLong(1)
else
0
end
ensure
rs.close
end
else
statement(conn) do |st|
rs = st.executeQuery('SELECT LAST_INSERT_ID()')
rs.next
rs.getLong(1)
end
end
end
# MySQL 5.1.12 JDBC adapter requires generated keys
# and previous versions don't mind.
def execute_statement_insert(stmt, sql)
stmt.executeUpdate(sql, JavaSQL::Statement::RETURN_GENERATED_KEYS)
end
# Return generated keys for insert statements.
def prepare_jdbc_statement(conn, sql, opts)
opts[:type] == :insert ? conn.prepareStatement(sql, JavaSQL::Statement::RETURN_GENERATED_KEYS) : super
end
# Convert tinyint(1) type to boolean
def schema_column_type(db_type)
db_type =~ /\Atinyint\(1\)/ ? :boolean : super
end
# Run the default connection setting SQL statements.
# Apply the connectiong setting SQLs for every new connection.
def setup_connection(conn)
mysql_connection_setting_sqls.each{|sql| statement(conn){|s| log_connection_yield(sql, conn){s.execute(sql)}}}
super
end
# Handle unsigned integer values
def setup_type_convertor_map
super
TypeConvertor::BASIC_MAP.dup
@type_convertor_map[Java::JavaSQL::Types::SMALLINT] = @type_convertor_map[Java::JavaSQL::Types::INTEGER]
@type_convertor_map[Java::JavaSQL::Types::INTEGER] = @type_convertor_map[Java::JavaSQL::Types::BIGINT]
@basic_type_convertor_map[Java::JavaSQL::Types::SMALLINT] = @basic_type_convertor_map[Java::JavaSQL::Types::INTEGER]
@basic_type_convertor_map[Java::JavaSQL::Types::INTEGER] = @basic_type_convertor_map[Java::JavaSQL::Types::BIGINT]
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/oracle.rb 0000664 0000000 0000000 00000010112 14342141206 0021436 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('Java::oracle.jdbc.driver.OracleDriver')
require_relative '../shared/oracle'
require_relative 'transactions'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:oracle] = proc do |db|
db.extend(Sequel::JDBC::Oracle::DatabaseMethods)
db.dataset_class = Sequel::JDBC::Oracle::Dataset
Java::oracle.jdbc.driver.OracleDriver
end
end
module Oracle
JAVA_BIG_DECIMAL_CONSTRUCTOR = java.math.BigDecimal.java_class.constructor(Java::long).method(:new_instance)
ORACLE_DECIMAL = Object.new
def ORACLE_DECIMAL.call(r, i)
if v = r.getBigDecimal(i)
i = v.long_value
if v == JAVA_BIG_DECIMAL_CONSTRUCTOR.call(i)
i
else
::Kernel::BigDecimal(v.to_string)
end
end
end
ORACLE_CLOB = Object.new
def ORACLE_CLOB.call(r, i)
return unless clob = r.getClob(i)
str = clob.getSubString(1, clob.length)
clob.freeTemporary if clob.isTemporary
str
end
module DatabaseMethods
include Sequel::Oracle::DatabaseMethods
include Sequel::JDBC::Transactions
def self.extended(db)
db.instance_exec do
@autosequence = opts[:autosequence]
@primary_key_sequences = {}
end
end
private
# Oracle exception handling with SQLState is less accurate than with regexps.
def database_exception_use_sqlstates?
false
end
def disconnect_error?(exception, opts)
super || exception.message =~ /\AClosed Connection/
end
# Default the fetch size for statements to 100, similar to the oci8-based oracle adapter.
def default_fetch_size
100
end
def last_insert_id(conn, opts)
unless sequence = opts[:sequence]
if t = opts[:table]
sequence = sequence_for_table(t)
end
end
if sequence
sql = "SELECT #{literal(sequence)}.currval FROM dual"
statement(conn) do |stmt|
begin
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
rescue java.sql.SQLException
nil
end
end
end
end
# Primary key indexes appear to start with sys_ on Oracle
def primary_key_index_re
/\Asys_/i
end
def schema_parse_table(*)
sch = super
sch.each do |c, s|
if s[:type] == :decimal && s[:scale] == -127
s[:type] = :integer
elsif s[:db_type] == 'DATE'
s[:type] = :datetime
end
end
sch
end
def schema_parse_table_skip?(h, schema)
super || (h[:table_schem] != current_user unless schema)
end
# As of Oracle 9.2, releasing savepoints is no longer supported.
def supports_releasing_savepoints?
false
end
def setup_type_convertor_map
super
@type_convertor_map[:OracleDecimal] = ORACLE_DECIMAL
@type_convertor_map[:OracleClob] = ORACLE_CLOB
end
end
class Dataset < JDBC::Dataset
include Sequel::Oracle::DatasetMethods
NUMERIC_TYPE = Java::JavaSQL::Types::NUMERIC
TIMESTAMP_TYPE = Java::JavaSQL::Types::TIMESTAMP
CLOB_TYPE = Java::JavaSQL::Types::CLOB
TIMESTAMPTZ_TYPES = [Java::oracle.jdbc.OracleTypes::TIMESTAMPTZ, Java::oracle.jdbc.OracleTypes::TIMESTAMPLTZ].freeze
def type_convertor(map, meta, type, i)
case type
when NUMERIC_TYPE
if meta.getScale(i) == 0
map[:OracleDecimal]
else
super
end
when *TIMESTAMPTZ_TYPES
map[TIMESTAMP_TYPE]
when CLOB_TYPE
map[:OracleClob]
else
super
end
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/postgresql.rb 0000664 0000000 0000000 00000020066 14342141206 0022405 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('org.postgresql.Driver', :Postgres)
require_relative '../shared/postgres'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:postgresql] = proc do |db|
db.dataset_class = Sequel::JDBC::Postgres::Dataset
db.extend(Sequel::JDBC::Postgres::DatabaseMethods)
org.postgresql.Driver
end
end
module Postgres
module DatabaseMethods
include Sequel::Postgres::DatabaseMethods
# Add the primary_keys and primary_key_sequences instance variables,
# so we can get the correct return values for inserted rows.
def self.extended(db)
super
db.send(:initialize_postgres_adapter)
end
# Remove any current entry for the oid in the oid_convertor_map.
def add_conversion_proc(oid, *)
super
Sequel.synchronize{@oid_convertor_map.delete(oid)}
end
# See Sequel::Postgres::Adapter#copy_into
def copy_into(table, opts=OPTS)
data = opts[:data]
data = Array(data) if data.is_a?(String)
if defined?(yield) && data
raise Error, "Cannot provide both a :data option and a block to copy_into"
elsif !defined?(yield) && !data
raise Error, "Must provide either a :data option or a block to copy_into"
end
synchronize(opts[:server]) do |conn|
begin
copy_manager = org.postgresql.copy.CopyManager.new(conn)
copier = copy_manager.copy_in(copy_into_sql(table, opts))
if defined?(yield)
while buf = yield
java_bytes = buf.to_java_bytes
copier.writeToCopy(java_bytes, 0, java_bytes.length)
end
else
data.each do |d|
java_bytes = d.to_java_bytes
copier.writeToCopy(java_bytes, 0, java_bytes.length)
end
end
rescue Exception => e
copier.cancelCopy if copier
raise
ensure
unless e
begin
copier.endCopy
rescue NativeException => e2
raise_error(e2)
end
end
end
end
end
# See Sequel::Postgres::Adapter#copy_table
def copy_table(table, opts=OPTS)
synchronize(opts[:server]) do |conn|
copy_manager = org.postgresql.copy.CopyManager.new(conn)
copier = copy_manager.copy_out(copy_table_sql(table, opts))
begin
if defined?(yield)
while buf = copier.readFromCopy
yield(String.from_java_bytes(buf))
end
nil
else
b = String.new
while buf = copier.readFromCopy
b << String.from_java_bytes(buf)
end
b
end
rescue => e
raise_error(e, :disconnect=>true)
ensure
if buf && !e
raise DatabaseDisconnectError, "disconnecting as a partial COPY may leave the connection in an unusable state"
end
end
end
end
def oid_convertor_proc(oid)
if (conv = Sequel.synchronize{@oid_convertor_map[oid]}).nil?
conv = if pr = conversion_procs[oid]
lambda do |r, i|
if v = r.getString(i)
pr.call(v)
end
end
else
false
end
Sequel.synchronize{@oid_convertor_map[oid] = conv}
end
conv
end
private
def disconnect_error?(exception, opts)
super || exception.message =~ /\A(This connection has been closed\.|FATAL: terminating connection due to administrator command|An I\/O error occurred while sending to the backend\.)\z/
end
# For PostgreSQL-specific types, return the string that should be used
# as the PGObject value. Returns nil by default, loading pg_* extensions
# will override this to add support for specific types.
def bound_variable_arg(arg, conn)
nil
end
# Work around issue when using Sequel's bound variable support where the
# same SQL is used in different bound variable calls, but the schema has
# changed between the calls. This is necessary as jdbc-postgres versions
# after 9.4.1200 violate the JDBC API. These versions cache separate
# PreparedStatement instances, which are eventually prepared server side after the
# prepareThreshold is met. The JDBC API violation is that PreparedStatement#close
# does not release the server side prepared statement.
def prepare_jdbc_statement(conn, sql, opts)
ps = super
unless opts[:name]
ps.prepare_threshold = 0
end
ps
end
# If the given argument is a recognized PostgreSQL-specific type, create
# a PGObject instance with unknown type and the bound argument string value,
# and set that as the prepared statement argument.
def set_ps_arg(cps, arg, i)
if v = bound_variable_arg(arg, nil)
obj = org.postgresql.util.PGobject.new
obj.setType("unknown")
obj.setValue(v)
cps.setObject(i, obj)
else
super
end
end
# Use setNull for nil arguments as the default behavior of setString
# with nil doesn't appear to work correctly on PostgreSQL.
def set_ps_arg_nil(cps, i)
cps.setNull(i, JavaSQL::Types::NULL)
end
# Execute the connection configuration SQL queries on the connection.
def setup_connection_with_opts(conn, opts)
conn = super
statement(conn) do |stmt|
connection_configuration_sqls(opts).each{|sql| log_connection_yield(sql, conn){stmt.execute(sql)}}
end
conn
end
def setup_type_convertor_map
super
@oid_convertor_map = {}
end
end
class Dataset < JDBC::Dataset
include Sequel::Postgres::DatasetMethods
# Warn when calling as the fetch size is ignored by the JDBC adapter currently.
def with_fetch_size(size)
warn("Sequel::JDBC::Postgres::Dataset#with_fetch_size does not currently have an effect.", :uplevel=>1)
super
end
private
# Literalize strings similar to the native postgres adapter
def literal_string_append(sql, v)
sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'"
end
# SQL fragment for Sequel::SQLTime, containing just the time part
def literal_sqltime(v)
v.strftime("'%H:%M:%S#{sprintf(".%03d", (v.usec/1000.0).round)}'")
end
STRING_TYPE = Java::JavaSQL::Types::VARCHAR
ARRAY_TYPE = Java::JavaSQL::Types::ARRAY
PG_SPECIFIC_TYPES = [Java::JavaSQL::Types::ARRAY, Java::JavaSQL::Types::OTHER, Java::JavaSQL::Types::STRUCT, Java::JavaSQL::Types::TIME_WITH_TIMEZONE, Java::JavaSQL::Types::TIME].freeze
# Return PostgreSQL hstore types as ruby Hashes instead of
# Java HashMaps. Only used if the database does not have a
# conversion proc for the type.
HSTORE_METHOD = Object.new
def HSTORE_METHOD.call(r, i)
if v = r.getObject(i)
v.to_hash
end
end
def type_convertor(map, meta, type, i)
case type
when *PG_SPECIFIC_TYPES
oid = meta.getField(i).getOID
if pr = db.oid_convertor_proc(oid)
pr
elsif oid == 2950 # UUID
map[STRING_TYPE]
elsif meta.getPGType(i) == 'hstore'
HSTORE_METHOD
else
super
end
else
super
end
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/sqlanywhere.rb 0000664 0000000 0000000 00000003665 14342141206 0022552 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/sqlanywhere'
require_relative 'transactions'
module Sequel
module JDBC
drv = [
lambda{Java::sybase.jdbc4.sqlanywhere.IDriver},
lambda{Java::ianywhere.ml.jdbcodbc.jdbc4.IDriver},
lambda{Java::sybase.jdbc.sqlanywhere.IDriver},
lambda{Java::ianywhere.ml.jdbcodbc.jdbc.IDriver},
lambda{Java::com.sybase.jdbc4.jdbc.Sybdriver},
lambda{Java::com.sybase.jdbc3.jdbc.Sybdriver}
].each do |class_proc|
begin
break class_proc.call
rescue NameError
end
end
raise(Sequel::AdapterNotFound, "no suitable SQLAnywhere JDBC driver found") unless drv
Sequel.synchronize do
DATABASE_SETUP[:sqlanywhere] = proc do |db|
db.extend(Sequel::JDBC::SqlAnywhere::DatabaseMethods)
db.convert_smallint_to_bool = true
db.dataset_class = Sequel::JDBC::SqlAnywhere::Dataset
drv
end
end
module SqlAnywhere
module DatabaseMethods
include Sequel::SqlAnywhere::DatabaseMethods
include Sequel::JDBC::Transactions
private
# Use @@IDENTITY to get the last inserted id
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
sql = 'SELECT @@IDENTITY'
rs = log_connection_yield(sql, conn){stmt.executeQuery(sql)}
rs.next
rs.getLong(1)
end
end
end
class Dataset < JDBC::Dataset
include Sequel::SqlAnywhere::DatasetMethods
private
SMALLINT_TYPE = Java::JavaSQL::Types::SMALLINT
BOOLEAN_METHOD = Object.new
def BOOLEAN_METHOD.call(r, i)
v = r.getShort(i)
v != 0 unless r.wasNull
end
def type_convertor(map, meta, type, i)
if convert_smallint_to_bool && type == SMALLINT_TYPE
BOOLEAN_METHOD
else
super
end
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/sqlite.rb 0000664 0000000 0000000 00000011125 14342141206 0021477 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('org.sqlite.JDBC', :SQLite3)
require_relative '../shared/sqlite'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:sqlite] = proc do |db|
db.extend(Sequel::JDBC::SQLite::DatabaseMethods)
db.extend_datasets Sequel::SQLite::DatasetMethods
db.set_integer_booleans
org.sqlite.JDBC
end
end
module SQLite
module ForeignKeyListPragmaConvertorFix
# For the use of the convertor for String, working around a bug
# in jdbc-sqlite3 that reports fields are of type
# java.sql.types.NUMERIC even though they contain non-numeric data.
def type_convertor(_, _, _, i)
i > 2 ? TypeConvertor::CONVERTORS[:String] : super
end
end
module TableInfoPragmaConvertorFix
# For the use of the convertor for String, working around a bug
# in jdbc-sqlite3 that reports dflt_value field is of type
# java.sql.types.NUMERIC even though they contain string data.
def type_convertor(_, _, _, i)
i == 5 ? TypeConvertor::CONVERTORS[:String] : super
end
end
module DatabaseMethods
include Sequel::SQLite::DatabaseMethods
# Swallow pointless exceptions when the foreign key list pragma
# doesn't return any rows.
def foreign_key_list(table, opts=OPTS)
super
rescue Sequel::DatabaseError => e
raise unless foreign_key_error?(e)
[]
end
# Swallow pointless exceptions when the index list pragma
# doesn't return any rows.
def indexes(table, opts=OPTS)
super
rescue Sequel::DatabaseError => e
raise unless foreign_key_error?(e)
{}
end
private
# Add workaround for bug when running foreign_key_list pragma
def _foreign_key_list_ds(_)
super.with_extend(ForeignKeyListPragmaConvertorFix)
end
# Add workaround for bug when running table_info pragma
def _parse_pragma_ds(_, _)
super.with_extend(TableInfoPragmaConvertorFix)
end
DATABASE_ERROR_REGEXPS = Sequel::SQLite::DatabaseMethods::DATABASE_ERROR_REGEXPS.merge(/Abort due to constraint violation/ => ConstraintViolation).freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Use last_insert_rowid() to get the last inserted id.
def last_insert_id(conn, opts=OPTS)
statement(conn) do |stmt|
rs = stmt.executeQuery('SELECT last_insert_rowid()')
rs.next
rs.getLong(1)
end
end
# Default to a single connection for a memory database.
def connection_pool_default_options
o = super
uri == 'jdbc:sqlite::memory:' ? o.merge(:max_connections=>1) : o
end
# Execute the connection pragmas on the connection.
def setup_connection(conn)
conn = super(conn)
statement(conn) do |stmt|
connection_pragmas.each{|s| log_connection_yield(s, conn){stmt.execute(s)}}
end
conn
end
# Whether the given exception is due to a foreign key error.
def foreign_key_error?(exception)
exception.message =~ /query does not return ResultSet/
end
# Use getLong instead of getInt for converting integers on SQLite, since SQLite does not enforce a limit of 2**32.
# Work around regressions in jdbc-sqlite 3.8.7 for date and blob types.
def setup_type_convertor_map
super
@type_convertor_map[Java::JavaSQL::Types::INTEGER] = @type_convertor_map[Java::JavaSQL::Types::BIGINT]
@basic_type_convertor_map[Java::JavaSQL::Types::INTEGER] = @basic_type_convertor_map[Java::JavaSQL::Types::BIGINT]
x = @type_convertor_map[Java::JavaSQL::Types::DATE] = Object.new
def x.call(r, i)
if v = r.getString(i)
Sequel.string_to_date(v)
end
end
x = @type_convertor_map[Java::JavaSQL::Types::BLOB] = Object.new
def x.call(r, i)
if v = r.getBytes(i)
Sequel::SQL::Blob.new(String.from_java_bytes(v))
elsif !r.wasNull
Sequel::SQL::Blob.new('')
end
end
end
# The result code for the exception, if the jdbc driver supports result codes for exceptions.
def sqlite_error_code(exception)
exception.resultCode.code if exception.respond_to?(:resultCode)
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/sqlserver.rb 0000664 0000000 0000000 00000005654 14342141206 0022236 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::JDBC.load_driver('com.microsoft.sqlserver.jdbc.SQLServerDriver')
require_relative 'mssql'
module Sequel
module JDBC
Sequel.synchronize do
DATABASE_SETUP[:sqlserver] = proc do |db|
db.extend(Sequel::JDBC::SQLServer::DatabaseMethods)
db.extend_datasets Sequel::MSSQL::DatasetMethods
db.send(:set_mssql_unicode_strings)
com.microsoft.sqlserver.jdbc.SQLServerDriver
end
end
module SQLServer
MSSQL_RUBY_TIME = Object.new
def MSSQL_RUBY_TIME.call(r, i)
# MSSQL-Server TIME should be fetched as string to keep the precision intact, see:
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/time-transact-sql#a-namebackwardcompatibilityfordownlevelclientsa-backward-compatibility-for-down-level-clients
if v = r.getString(i)
Sequel.string_to_time("#{v}")
end
end
module DatabaseMethods
include Sequel::JDBC::MSSQL::DatabaseMethods
def setup_type_convertor_map
super
map = @type_convertor_map
map[Java::JavaSQL::Types::TIME] = MSSQL_RUBY_TIME
# Work around constant lazy loading in some drivers
begin
dto = Java::MicrosoftSql::Types::DATETIMEOFFSET
rescue NameError
end
if dto
map[dto] = lambda do |r, i|
if v = r.getDateTimeOffset(i)
to_application_timestamp(v.to_s)
end
end
end
end
# Work around a bug in SQL Server JDBC Driver 3.0, where the metadata
# for the getColumns result set specifies an incorrect type for the
# IS_AUTOINCREMENT column. The column is a string, but the type is
# specified as a short. This causes getObject() to throw a
# com.microsoft.sqlserver.jdbc.SQLServerException: "The conversion
# from char to SMALLINT is unsupported." Using getString() rather
# than getObject() for this column avoids the problem.
# Reference: http://social.msdn.microsoft.com/Forums/en/sqldataaccess/thread/20df12f3-d1bf-4526-9daa-239a83a8e435
module MetadataDatasetMethods
def type_convertor(map, meta, type, i)
if output_identifier(meta.getColumnLabel(i)) == :is_autoincrement
map[Java::JavaSQL::Types::VARCHAR]
else
super
end
end
def basic_type_convertor(map, meta, type, i)
if output_identifier(meta.getColumnLabel(i)) == :is_autoincrement
map[Java::JavaSQL::Types::VARCHAR]
else
super
end
end
end
private
def _metadata_dataset
super.with_extend(MetadataDatasetMethods)
end
def disconnect_error?(exception, opts)
super || (exception.message =~ /connection is closed/)
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/jdbc/transactions.rb 0000664 0000000 0000000 00000007143 14342141206 0022713 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module JDBC
module Transactions
def freeze
supports_savepoints?
super
end
# Check the JDBC DatabaseMetaData for savepoint support
def supports_savepoints?
return @supports_savepoints if defined?(@supports_savepoints)
@supports_savepoints = synchronize{|c| c.getMetaData.supports_savepoints}
end
# Check the JDBC DatabaseMetaData for support for serializable isolation,
# since that's the value most people will use.
def supports_transaction_isolation_levels?
synchronize{|conn| conn.getMetaData.supportsTransactionIsolationLevel(JavaSQL::Connection::TRANSACTION_SERIALIZABLE)}
end
private
JDBC_TRANSACTION_ISOLATION_LEVELS = {:uncommitted=>JavaSQL::Connection::TRANSACTION_READ_UNCOMMITTED,
:committed=>JavaSQL::Connection::TRANSACTION_READ_COMMITTED,
:repeatable=>JavaSQL::Connection::TRANSACTION_REPEATABLE_READ,
:serializable=>JavaSQL::Connection::TRANSACTION_SERIALIZABLE}.freeze
# Set the transaction isolation level on the given connection using
# the JDBC API.
def set_transaction_isolation(conn, opts)
level = opts.fetch(:isolation, transaction_isolation_level)
if (jdbc_level = JDBC_TRANSACTION_ISOLATION_LEVELS[level]) &&
conn.getMetaData.supportsTransactionIsolationLevel(jdbc_level)
_trans(conn)[:original_jdbc_isolation_level] = conn.getTransactionIsolation
log_connection_yield("Transaction.isolation_level = #{level}", conn){conn.setTransactionIsolation(jdbc_level)}
end
end
# Most JDBC drivers that support savepoints support releasing them.
def supports_releasing_savepoints?
true
end
# JDBC savepoint object for the current savepoint for the connection.
def savepoint_obj(conn)
_trans(conn)[:savepoints][-1][:obj]
end
# Use JDBC connection's setAutoCommit to false to start transactions
def begin_transaction(conn, opts=OPTS)
if in_savepoint?(conn)
_trans(conn)[:savepoints][-1][:obj] = log_connection_yield('Transaction.savepoint', conn){conn.set_savepoint}
else
log_connection_yield('Transaction.begin', conn){conn.setAutoCommit(false)}
set_transaction_isolation(conn, opts)
end
end
# Use JDBC connection's commit method to commit transactions
def commit_transaction(conn, opts=OPTS)
if in_savepoint?(conn)
if supports_releasing_savepoints?
log_connection_yield('Transaction.release_savepoint', conn){conn.release_savepoint(savepoint_obj(conn))}
end
else
log_connection_yield('Transaction.commit', conn){conn.commit}
end
end
# Use JDBC connection's setAutoCommit to true to enable non-transactional behavior
def remove_transaction(conn, committed)
if jdbc_level = _trans(conn)[:original_jdbc_isolation_level]
log_connection_yield("Transaction.restore_isolation_level", conn){conn.setTransactionIsolation(jdbc_level)}
end
unless in_savepoint?(conn)
conn.setAutoCommit(true)
end
ensure
super
end
# Use JDBC connection's rollback method to rollback transactions
def rollback_transaction(conn, opts=OPTS)
if in_savepoint?(conn)
log_connection_yield('Transaction.rollback_savepoint', conn){conn.rollback(savepoint_obj(conn))}
else
log_connection_yield('Transaction.rollback', conn){conn.rollback}
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/mock.rb 0000664 0000000 0000000 00000026127 14342141206 0020235 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative 'utils/unmodified_identifiers'
module Sequel
module Mock
class Connection
# Sequel::Mock::Database object that created this connection
attr_reader :db
# Shard this connection operates on, when using Sequel's
# sharding support (always :default for databases not using
# sharding).
attr_reader :server
# The specific database options for this connection.
attr_reader :opts
# Store the db, server, and opts.
def initialize(db, server, opts)
@db = db
@server = server
@opts = opts
end
# Delegate to the db's #_execute method.
def execute(sql)
@db.send(:_execute, self, sql, :log=>false)
end
end
class Database < Sequel::Database
set_adapter_scheme :mock
# Set the autogenerated primary key integer
# to be returned when running an insert query.
# Argument types supported:
#
# nil :: Return nil for all inserts
# Integer :: Starting integer for next insert, with
# futher inserts getting an incremented
# value
# Array :: First insert gets the first value in the
# array, second gets the second value, etc.
# Proc :: Called with the insert SQL query, uses
# the value returned
# Class :: Should be an Exception subclass, will create a new
# instance an raise it wrapped in a DatabaseError.
def autoid=(v)
@autoid = case v
when Integer
i = v - 1
proc{@mutex.synchronize{i+=1}}
else
v
end
end
# Set the columns to set in the dataset when the dataset fetches
# rows. Argument types supported:
# nil :: Set no columns
# Array of Symbols :: Used for all datasets
# Array (otherwise) :: First retrieval gets the first value in the
# array, second gets the second value, etc.
# Proc :: Called with the select SQL query, uses the value
# returned, which should be an array of symbols
attr_writer :columns
# Set the hashes to yield by execute when retrieving rows.
# Argument types supported:
#
# nil :: Yield no rows
# Hash :: Always yield a single row with this hash
# Array of Hashes :: Yield separately for each hash in this array
# Array (otherwise) :: First retrieval gets the first value
# in the array, second gets the second value, etc.
# Proc :: Called with the select SQL query, uses
# the value returned, which should be a hash or
# array of hashes.
# Class :: Should be an Exception subclass, will create a new
# instance an raise it wrapped in a DatabaseError.
attr_writer :fetch
# Set the number of rows to return from update or delete.
# Argument types supported:
#
# nil :: Return 0 for all updates and deletes
# Integer :: Used for all updates and deletes
# Array :: First update/delete gets the first value in the
# array, second gets the second value, etc.
# Proc :: Called with the update/delete SQL query, uses
# the value returned.
# Class :: Should be an Exception subclass, will create a new
# instance an raise it wrapped in a DatabaseError.
attr_writer :numrows
# Mock the server version, useful when using the shared adapters
attr_accessor :server_version
# Return a related Connection option connecting to the given shard.
def connect(server)
Connection.new(self, server, server_opts(server))
end
def disconnect_connection(c)
end
# Store the sql used for later retrieval with #sqls, and return
# the appropriate value using either the #autoid, #fetch, or
# #numrows methods.
def execute(sql, opts=OPTS, &block)
synchronize(opts[:server]){|c| _execute(c, sql, opts, &block)}
end
alias execute_ddl execute
# Store the sql used, and return the value of the #numrows method.
def execute_dui(sql, opts=OPTS)
execute(sql, opts.merge(:meth=>:numrows))
end
# Store the sql used, and return the value of the #autoid method.
def execute_insert(sql, opts=OPTS)
execute(sql, opts.merge(:meth=>:autoid))
end
# Return all stored SQL queries, and clear the cache
# of SQL queries.
def sqls
@mutex.synchronize do
s = @sqls.dup
@sqls.clear
s
end
end
# Enable use of savepoints.
def supports_savepoints?
shared_adapter? ? super : true
end
private
def _execute(c, sql, opts=OPTS, &block)
sql += " -- args: #{opts[:arguments].inspect}" if opts[:arguments]
sql += " -- #{@opts[:append]}" if @opts[:append]
sql += " -- #{c.server.is_a?(Symbol) ? c.server : c.server.inspect}" if c.server != :default
log_connection_yield(sql, c){} unless opts[:log] == false
@mutex.synchronize{@sqls << sql}
ds = opts[:dataset]
begin
if block
columns(ds, sql) if ds
_fetch(sql, (ds._fetch if ds) || @fetch, &block)
elsif meth = opts[:meth]
if meth == :numrows
_numrows(sql, (ds.numrows if ds) || @numrows)
else
if ds
@mutex.synchronize do
v = ds.autoid
if v.is_a?(Integer)
ds.send(:cache_set, :_autoid, v + 1)
end
v
end
end || _nextres(@autoid, sql, nil)
end
end
rescue => e
raise_error(e)
end
end
def _fetch(sql, f, &block)
case f
when Hash
yield f.dup
when Array
if f.all?{|h| h.is_a?(Hash)}
f.each{|h| yield h.dup}
else
_fetch(sql, @mutex.synchronize{f.shift}, &block)
end
when Proc
h = f.call(sql)
if h.is_a?(Hash)
yield h.dup
elsif h
h.each{|h1| yield h1.dup}
end
when Class
if f < Exception
raise f
else
raise Error, "Invalid @fetch attribute: #{v.inspect}"
end
when nil
# nothing
else
raise Error, "Invalid @fetch attribute: #{f.inspect}"
end
end
def _nextres(v, sql, default)
case v
when Integer
v
when Array
v.empty? ? default : _nextres(@mutex.synchronize{v.shift}, sql, default)
when Proc
v.call(sql)
when Class
if v < Exception
raise v
else
raise Error, "Invalid @autoid/@numrows attribute: #{v.inspect}"
end
when nil
default
else
raise Error, "Invalid @autoid/@numrows attribute: #{v.inspect}"
end
end
def _numrows(sql, v)
_nextres(v, sql, 0)
end
# Additional options supported:
#
# :autoid :: Call #autoid= with the value
# :columns :: Call #columns= with the value
# :fetch :: Call #fetch= with the value
# :numrows :: Call #numrows= with the value
# :extend :: A module the object is extended with.
# :sqls :: The array to store the SQL queries in.
def adapter_initialize
opts = @opts
@mutex = Mutex.new
@sqls = opts[:sqls] || []
@shared_adapter = false
case db_type = opts[:host]
when String, Symbol
db_type = db_type.to_sym
unless mod = Sequel.synchronize{SHARED_ADAPTER_MAP[db_type]}
begin
require "sequel/adapters/shared/#{db_type}"
rescue LoadError
else
mod = Sequel.synchronize{SHARED_ADAPTER_MAP[db_type]}
end
end
if mod
@shared_adapter = true
extend(mod::DatabaseMethods)
extend_datasets(mod::DatasetMethods)
if mod.respond_to?(:mock_adapter_setup)
mod.mock_adapter_setup(self)
end
end
end
unless @shared_adapter
extend UnmodifiedIdentifiers::DatabaseMethods
extend_datasets UnmodifiedIdentifiers::DatasetMethods
end
self.autoid = opts[:autoid]
self.columns = opts[:columns]
self.fetch = opts[:fetch]
self.numrows = opts[:numrows]
extend(opts[:extend]) if opts[:extend]
sqls
end
def columns(ds, sql, cs=@columns)
case cs
when Array
unless cs.empty?
if cs.all?{|c| c.is_a?(Symbol)}
ds.columns(*cs)
else
columns(ds, sql, @mutex.synchronize{cs.shift})
end
end
when Proc
ds.columns(*cs.call(sql))
when nil
# nothing
else
raise Error, "Invalid @columns attribute: #{cs.inspect}"
end
end
def dataset_class_default
Dataset
end
def quote_identifiers_default
shared_adapter? ? super : false
end
def shared_adapter?
@shared_adapter
end
end
class Dataset < Sequel::Dataset
# The autoid setting for this dataset, if it has been overridden
def autoid
cache_get(:_autoid) || @opts[:autoid]
end
# The fetch setting for this dataset, if it has been overridden
def _fetch
cache_get(:_fetch) || @opts[:fetch]
end
# The numrows setting for this dataset, if it has been overridden
def numrows
cache_get(:_numrows) || @opts[:numrows]
end
# If arguments are provided, use them to set the columns
# for this dataset and return self. Otherwise, use the
# default Sequel behavior and return the columns.
def columns(*cs)
if cs.empty?
super
else
self.columns = cs
self
end
end
def fetch_rows(sql, &block)
execute(sql, &block)
end
def quote_identifiers?
@opts.fetch(:quote_identifiers, db.send(:quote_identifiers_default))
end
# Return cloned dataset with the autoid setting modified
def with_autoid(autoid)
clone(:autoid=>autoid)
end
# Return cloned dataset with the fetch setting modified
def with_fetch(fetch)
clone(:fetch=>fetch)
end
# Return cloned dataset with the numrows setting modified
def with_numrows(numrows)
clone(:numrows=>numrows)
end
private
def execute(sql, opts=OPTS, &block)
super(sql, opts.merge(:dataset=>self), &block)
end
def execute_dui(sql, opts=OPTS, &block)
super(sql, opts.merge(:dataset=>self), &block)
end
def execute_insert(sql, opts=OPTS, &block)
super(sql, opts.merge(:dataset=>self), &block)
end
def non_sql_option?(key)
super || key == :fetch || key == :numrows || key == :autoid
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/mysql.rb 0000664 0000000 0000000 00000032411 14342141206 0020442 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'mysql'
raise(LoadError, "require 'mysql' did not define Mysql::CLIENT_MULTI_RESULTS!, so it not supported. Please install the mysql or ruby-mysql gem.\n") unless defined?(Mysql::CLIENT_MULTI_RESULTS)
require_relative 'utils/mysql_mysql2'
require_relative 'utils/mysql_prepared_statements'
module Sequel
module MySQL
boolean = Object.new
def boolean.call(s) s.to_i != 0 end
TYPE_TRANSLATOR_BOOLEAN = boolean.freeze
integer = Object.new
def integer.call(s) s.to_i end
TYPE_TRANSLATOR_INTEGER = integer.freeze
float = Object.new
def float.call(s) s.to_f end
# Hash with integer keys and callable values for converting MySQL types.
MYSQL_TYPES = {}
{
[0, 246] => ::Kernel.method(:BigDecimal),
[2, 3, 8, 9, 13, 247, 248] => integer,
[4, 5] => float,
[249, 250, 251, 252] => ::Sequel::SQL::Blob
}.each do |k,v|
k.each{|n| MYSQL_TYPES[n] = v}
end
MYSQL_TYPES.freeze
class Database < Sequel::Database
include Sequel::MySQL::DatabaseMethods
include Sequel::MySQL::MysqlMysql2::DatabaseMethods
include Sequel::MySQL::PreparedStatements::DatabaseMethods
set_adapter_scheme :mysql
# Hash of conversion procs for the current database
attr_reader :conversion_procs
# Whether to convert tinyint columns to bool for the current database
attr_reader :convert_tinyint_to_bool
# By default, Sequel raises an exception if in invalid date or time is used.
# However, if this is set to nil or :nil, the adapter treats dates
# like 0000-00-00 and times like 838:00:00 as nil values. If set to :string,
# it returns the strings as is.
attr_reader :convert_invalid_date_time
# Connect to the database. In addition to the usual database options,
# the following options have effect:
#
# :auto_is_null :: Set to true to use MySQL default behavior of having
# a filter for an autoincrement column equals NULL to return the last
# inserted row.
# :charset :: Same as :encoding (:encoding takes precendence)
# :compress :: Set to false to not compress results from the server
# :config_default_group :: The default group to read from the in
# the MySQL config file.
# :config_local_infile :: If provided, sets the Mysql::OPT_LOCAL_INFILE
# option on the connection with the given value.
# :connect_timeout :: Set the timeout in seconds before a connection
# attempt is abandoned.
# :encoding :: Set all the related character sets for this
# connection (connection, client, database, server, and results).
# :read_timeout :: Set the timeout in seconds for reading back results
# to a query.
# :socket :: Use a unix socket file instead of connecting via TCP/IP.
# :timeout :: Set the timeout in seconds before the server will
# disconnect this connection (a.k.a @@wait_timeout).
def connect(server)
opts = server_opts(server)
if Mysql.respond_to?(:init)
conn = Mysql.init
conn.options(Mysql::READ_DEFAULT_GROUP, opts[:config_default_group] || "client")
conn.options(Mysql::OPT_LOCAL_INFILE, opts[:config_local_infile]) if opts.has_key?(:config_local_infile)
if encoding = opts[:encoding] || opts[:charset]
# Set encoding before connecting so that the mysql driver knows what
# encoding we want to use, but this can be overridden by READ_DEFAULT_GROUP.
conn.options(Mysql::SET_CHARSET_NAME, encoding)
end
if read_timeout = opts[:read_timeout] and defined? Mysql::OPT_READ_TIMEOUT
conn.options(Mysql::OPT_READ_TIMEOUT, read_timeout)
end
if connect_timeout = opts[:connect_timeout] and defined? Mysql::OPT_CONNECT_TIMEOUT
conn.options(Mysql::OPT_CONNECT_TIMEOUT, connect_timeout)
end
else
# ruby-mysql 3 API
conn = Mysql.new
# no support for default group
conn.local_infile = opts[:config_local_infile] if opts.has_key?(:config_local_infile)
if encoding = opts[:encoding] || opts[:charset]
conn.charset = encoding
end
if read_timeout = opts[:read_timeout]
conn.read_timeout = read_timeout
end
if connect_timeout = opts[:connect_timeout]
conn.connect_timeout = connect_timeout
end
conn.singleton_class.class_eval do
alias real_connect connect
alias use_result store_result
end
end
conn.ssl_set(opts[:sslkey], opts[:sslcert], opts[:sslca], opts[:sslcapath], opts[:sslcipher]) if opts[:sslca] || opts[:sslkey]
conn.real_connect(
opts[:host] || 'localhost',
opts[:user],
opts[:password],
opts[:database],
(opts[:port].to_i if opts[:port]),
opts[:socket],
Mysql::CLIENT_MULTI_RESULTS +
Mysql::CLIENT_MULTI_STATEMENTS +
(opts[:compress] == false ? 0 : Mysql::CLIENT_COMPRESS)
)
sqls = mysql_connection_setting_sqls
# Set encoding a slightly different way after connecting,
# in case the READ_DEFAULT_GROUP overrode the provided encoding.
# Doesn't work across implicit reconnects, but Sequel doesn't turn on
# that feature.
sqls.unshift("SET NAMES #{literal(encoding.to_s)}") if encoding
sqls.each{|sql| log_connection_yield(sql, conn){conn.query(sql)}}
add_prepared_statements_cache(conn)
conn
end
def disconnect_connection(c)
c.close
rescue Mysql::Error
nil
end
# Modify the type translators for the date, time, and timestamp types
# depending on the value given.
def convert_invalid_date_time=(v)
m0 = ::Sequel.method(:string_to_time)
@conversion_procs[11] = (v != false) ? lambda{|val| convert_date_time(val, &m0)} : m0
m1 = ::Sequel.method(:string_to_date)
m = (v != false) ? lambda{|val| convert_date_time(val, &m1)} : m1
[10, 14].each{|i| @conversion_procs[i] = m}
m2 = method(:to_application_timestamp)
m = (v != false) ? lambda{|val| convert_date_time(val, &m2)} : m2
[7, 12].each{|i| @conversion_procs[i] = m}
@convert_invalid_date_time = v
end
# Modify the type translator used for the tinyint type based
# on the value given.
def convert_tinyint_to_bool=(v)
@conversion_procs[1] = v ? TYPE_TRANSLATOR_BOOLEAN : TYPE_TRANSLATOR_INTEGER
@convert_tinyint_to_bool = v
end
def execute_dui(sql, opts=OPTS)
execute(sql, opts){|c| return affected_rows(c)}
end
def execute_insert(sql, opts=OPTS)
execute(sql, opts){|c| return c.insert_id}
end
def freeze
server_version
@conversion_procs.freeze
super
end
private
# Execute the given SQL on the given connection. If the :type
# option is :select, yield the result of the query, otherwise
# yield the connection if a block is given.
def _execute(conn, sql, opts)
r = log_connection_yield((log_sql = opts[:log_sql]) ? sql + log_sql : sql, conn){conn.query(sql)}
if opts[:type] == :select
yield r if r
elsif defined?(yield)
yield conn
end
if conn.respond_to?(:more_results?)
while conn.more_results? do
if r
r.free
r = nil
end
begin
conn.next_result
r = conn.use_result
rescue Mysql::Error => e
raise_error(e, :disconnect=>true) if MYSQL_DATABASE_DISCONNECT_ERRORS.match(e.message)
break
end
yield r if opts[:type] == :select
end
end
rescue Mysql::Error => e
raise_error(e)
ensure
r.free if r
# Use up all results to avoid a commands out of sync message.
if conn.respond_to?(:more_results?)
while conn.more_results? do
begin
conn.next_result
r = conn.use_result
rescue Mysql::Error => e
raise_error(e, :disconnect=>true) if MYSQL_DATABASE_DISCONNECT_ERRORS.match(e.message)
break
end
r.free if r
end
end
end
def adapter_initialize
@conversion_procs = MYSQL_TYPES.dup
self.convert_tinyint_to_bool = true
self.convert_invalid_date_time = false
end
# Try to get an accurate number of rows matched using the query
# info. Fall back to affected_rows if there was no match, but
# that may be inaccurate.
def affected_rows(conn)
s = conn.info
if s && s =~ /Rows matched:\s+(\d+)\s+Changed:\s+\d+\s+Warnings:\s+\d+/
$1.to_i
else
conn.affected_rows
end
end
# MySQL connections use the query method to execute SQL without a result
def connection_execute_method
:query
end
# If convert_invalid_date_time is nil, :nil, or :string and
# the conversion raises an InvalidValue exception, return v
# if :string and nil otherwise.
def convert_date_time(v)
yield v
rescue InvalidValue
case @convert_invalid_date_time
when nil, :nil
nil
when :string
v
else
raise
end
end
def database_error_classes
[Mysql::Error]
end
def database_exception_sqlstate(exception, opts)
exception.sqlstate
end
def dataset_class_default
Dataset
end
def disconnect_error?(e, opts)
super || (e.is_a?(::Mysql::Error) && MYSQL_DATABASE_DISCONNECT_ERRORS.match(e.message))
end
# Convert tinyint(1) type to boolean if convert_tinyint_to_bool is true
def schema_column_type(db_type)
convert_tinyint_to_bool && db_type =~ /\Atinyint\(1\)/ ? :boolean : super
end
end
class Dataset < Sequel::Dataset
include Sequel::MySQL::DatasetMethods
include Sequel::MySQL::MysqlMysql2::DatasetMethods
include Sequel::MySQL::PreparedStatements::DatasetMethods
# Yield all rows matching this dataset. If the dataset is set to
# split multiple statements, yield arrays of hashes one per statement
# instead of yielding results for all statements as hashes.
def fetch_rows(sql)
execute(sql) do |r|
i = -1
cps = db.conversion_procs
cols = r.fetch_fields.map do |f|
# Pretend tinyint is another integer type if its length is not 1, to
# avoid casting to boolean if convert_tinyint_to_bool is set.
type_proc = f.type == 1 && cast_tinyint_integer?(f) ? cps[2] : cps[f.type]
[output_identifier(f.name), type_proc, i+=1]
end
self.columns = cols.map(&:first)
if opts[:split_multiple_result_sets]
s = []
yield_rows(r, cols){|h| s << h}
yield s
else
yield_rows(r, cols){|h| yield h}
end
end
self
end
# Don't allow graphing a dataset that splits multiple statements
def graph(*)
raise(Error, "Can't graph a dataset that splits multiple result sets") if opts[:split_multiple_result_sets]
super
end
# Makes each yield arrays of rows, with each array containing the rows
# for a given result set. Does not work with graphing. So you can submit
# SQL with multiple statements and easily determine which statement
# returned which results.
#
# Modifies the row_proc of the returned dataset so that it still works
# as expected (running on the hashes instead of on the arrays of hashes).
# If you modify the row_proc afterward, note that it will receive an array
# of hashes instead of a hash.
def split_multiple_result_sets
raise(Error, "Can't split multiple statements on a graphed dataset") if opts[:graph]
ds = clone(:split_multiple_result_sets=>true)
ds = ds.with_row_proc(proc{|x| x.map{|h| row_proc.call(h)}}) if row_proc
ds
end
private
# Whether a tinyint field should be casted as an integer. By default,
# casts to integer if the field length is not 1. Can be overwritten
# to make tinyint casting dataset dependent.
def cast_tinyint_integer?(field)
field.length != 1
end
def execute(sql, opts=OPTS)
opts = Hash[opts]
opts[:type] = :select
super
end
# Handle correct quoting of strings using ::MySQL.quote.
def literal_string_append(sql, v)
sql << "'" << ::Mysql.quote(v) << "'"
end
# Yield each row of the given result set r with columns cols
# as a hash with symbol keys
def yield_rows(r, cols)
while row = r.fetch_row
h = {}
cols.each{|n, p, i| v = row[i]; h[n] = (v && p) ? p.call(v) : v}
yield h
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/mysql2.rb 0000664 0000000 0000000 00000023007 14342141206 0020525 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'mysql2'
require_relative 'utils/mysql_mysql2'
module Sequel
module Mysql2
NativePreparedStatements = if ::Mysql2::VERSION >= '0.4'
true
else
require_relative 'utils/mysql_prepared_statements'
false
end
class Database < Sequel::Database
include Sequel::MySQL::DatabaseMethods
include Sequel::MySQL::MysqlMysql2::DatabaseMethods
include Sequel::MySQL::PreparedStatements::DatabaseMethods unless NativePreparedStatements
set_adapter_scheme :mysql2
# Whether to convert tinyint columns to bool for this database
attr_accessor :convert_tinyint_to_bool
# Connect to the database. In addition to the usual database options,
# the following options have effect:
#
# :auto_is_null :: Set to true to use MySQL default behavior of having
# a filter for an autoincrement column equals NULL to return the last
# inserted row.
# :charset :: Same as :encoding (:encoding takes precendence)
# :encoding :: Set all the related character sets for this
# connection (connection, client, database, server, and results).
#
# The options hash is also passed to mysql2, and can include mysql2
# options such as :local_infile.
def connect(server)
opts = server_opts(server)
opts[:username] ||= opts.delete(:user)
opts[:flags] ||= 0
opts[:flags] |= ::Mysql2::Client::FOUND_ROWS if ::Mysql2::Client.const_defined?(:FOUND_ROWS)
opts[:encoding] ||= opts[:charset]
conn = ::Mysql2::Client.new(opts)
conn.query_options.merge!(:symbolize_keys=>true, :cache_rows=>false)
if NativePreparedStatements
conn.instance_variable_set(:@sequel_default_query_options, conn.query_options.dup)
end
sqls = mysql_connection_setting_sqls
# Set encoding a slightly different way after connecting,
# in case the READ_DEFAULT_GROUP overrode the provided encoding.
# Doesn't work across implicit reconnects, but Sequel doesn't turn on
# that feature.
if encoding = opts[:encoding]
sqls.unshift("SET NAMES #{conn.escape(encoding.to_s)}")
end
sqls.each{|sql| log_connection_yield(sql, conn){conn.query(sql)}}
add_prepared_statements_cache(conn)
conn
end
def execute_dui(sql, opts=OPTS)
execute(sql, opts){|c| return c.affected_rows}
end
def execute_insert(sql, opts=OPTS)
execute(sql, opts){|c| return c.last_id}
end
def freeze
server_version
super
end
# Return the version of the MySQL server to which we are connecting.
def server_version(_server=nil)
@server_version ||= super()
end
private
if NativePreparedStatements
# Use a native mysql2 prepared statement to implement prepared statements.
def execute_prepared_statement(ps_name, opts, &block)
if ps_name.is_a?(Sequel::Dataset::ArgumentMapper)
ps = ps_name
ps_name = ps.prepared_statement_name
else
ps = prepared_statement(ps_name)
end
sql = ps.prepared_sql
synchronize(opts[:server]) do |conn|
stmt, ps_sql = conn.prepared_statements[ps_name]
unless ps_sql == sql
stmt.close if stmt
stmt = log_connection_yield("Preparing #{ps_name}: #{sql}", conn){conn.prepare(sql)}
conn.prepared_statements[ps_name] = [stmt, sql]
end
opts = Hash[opts]
opts[:sql] = "Executing #{ps_name || sql}"
if ps_name && ps.log_sql
opts[:log_sql] = " (#{sql})"
end
_execute(conn, stmt, opts, &block)
end
end
end
# Execute the given SQL on the given connection. If the :type
# option is :select, yield the result of the query, otherwise
# yield the connection if a block is given.
def _execute(conn, sql, opts)
stream = opts[:stream]
if NativePreparedStatements
if args = opts[:arguments]
args = args.map{|arg| bound_variable_value(arg)}
end
case sql
when ::Mysql2::Statement
stmt = sql
sql = opts[:sql] || ''
when Dataset
sql = sql.sql
close_stmt = true
stmt = conn.prepare(sql)
end
end
r = log_connection_yield((log_sql = opts[:log_sql]) ? sql + log_sql : sql, conn, args) do
if stmt
conn.query_options.merge!(:cache_rows=>true, :database_timezone => timezone, :application_timezone => Sequel.application_timezone, :stream=>stream, :cast_booleans=>convert_tinyint_to_bool)
stmt.execute(*args)
else
conn.query(sql, :database_timezone => timezone, :application_timezone => Sequel.application_timezone, :stream=>stream)
end
end
if opts[:type] == :select
if r
if stream
begin
r2 = yield r
ensure
# If r2 is nil, it means the block did not exit normally,
# so the rest of the results must be drained to prevent
# "commands out of sync" errors.
r.each{} unless r2
end
else
yield r
end
end
elsif defined?(yield)
yield conn
end
rescue ::Mysql2::Error => e
raise_error(e)
ensure
if stmt
conn.query_options.replace(conn.instance_variable_get(:@sequel_default_query_options))
stmt.close if close_stmt
end
end
# Set the convert_tinyint_to_bool setting based on the default value.
def adapter_initialize
self.convert_tinyint_to_bool = true
end
if NativePreparedStatements
# Handle bound variable arguments that Mysql2 does not handle natively.
def bound_variable_value(arg)
case arg
when true
1
when false
0
when DateTime, Time
literal(arg)[1...-1]
else
arg
end
end
end
def connection_execute_method
:query
end
def database_error_classes
[::Mysql2::Error]
end
def database_exception_sqlstate(exception, opts)
state = exception.sql_state
state unless state == 'HY000'
end
def dataset_class_default
Dataset
end
# If a connection object is available, try pinging it. Otherwise, if the
# error is a Mysql2::Error, check the SQL state and exception message for
# disconnects.
def disconnect_error?(e, opts)
super ||
((conn = opts[:conn]) && !conn.ping) ||
(e.is_a?(::Mysql2::Error) &&
(e.sql_state =~ /\A08/ ||
MYSQL_DATABASE_DISCONNECT_ERRORS.match(e.message)))
end
# Convert tinyint(1) type to boolean if convert_tinyint_to_bool is true
def schema_column_type(db_type)
convert_tinyint_to_bool && db_type =~ /\Atinyint\(1\)/ ? :boolean : super
end
end
class Dataset < Sequel::Dataset
include Sequel::MySQL::DatasetMethods
include Sequel::MySQL::MysqlMysql2::DatasetMethods
include Sequel::MySQL::PreparedStatements::DatasetMethods unless NativePreparedStatements
STREAMING_SUPPORTED = ::Mysql2::VERSION >= '0.3.12'
if NativePreparedStatements
PreparedStatementMethods = prepared_statements_module(
"sql = self; opts = Hash[opts]; opts[:arguments] = bind_arguments",
Sequel::Dataset::UnnumberedArgumentMapper,
%w"execute execute_dui execute_insert")
end
def fetch_rows(sql)
execute(sql) do |r|
self.columns = r.fields.map!{|c| output_identifier(c.to_s)}
r.each(:cast_booleans=>convert_tinyint_to_bool?){|h| yield h}
end
self
end
# Use streaming to implement paging if Mysql2 supports it and
# it hasn't been disabled.
def paged_each(opts=OPTS, &block)
if STREAMING_SUPPORTED && opts[:stream] != false
unless defined?(yield)
return enum_for(:paged_each, opts)
end
stream.each(&block)
else
super
end
end
# Return a clone of the dataset that will stream rows when iterating
# over the result set, so it can handle large datasets that
# won't fit in memory (Requires mysql 0.3.12+ to have an effect).
def stream
clone(:stream=>true)
end
private
# Whether to cast tinyint(1) columns to integer instead of boolean.
# By default, uses the database's convert_tinyint_to_bool
# setting. Exists for compatibility with the mysql adapter.
def convert_tinyint_to_bool?
@db.convert_tinyint_to_bool
end
def execute(sql, opts=OPTS)
opts = Hash[opts]
opts[:type] = :select
opts[:stream] = @opts[:stream]
super
end
if NativePreparedStatements
def bound_variable_modules
[PreparedStatementMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
end
# Handle correct quoting of strings using ::Mysql2::Client#escape.
def literal_string_append(sql, v)
sql << "'" << db.synchronize(@opts[:server]){|c| c.escape(v)} << "'"
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/odbc.rb 0000664 0000000 0000000 00000007647 14342141206 0020221 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'odbc'
module Sequel
module ODBC
# Contains procs keyed on subadapter type that extend the
# given database object so it supports the correct database type.
DATABASE_SETUP = {}
class Database < Sequel::Database
set_adapter_scheme :odbc
def connect(server)
opts = server_opts(server)
conn = if opts.include?(:drvconnect)
::ODBC::Database.new.drvconnect(opts[:drvconnect])
elsif opts.include?(:driver)
drv = ::ODBC::Driver.new
drv.name = 'Sequel ODBC Driver130'
opts.each do |param, value|
if :driver == param && value !~ /\A\{.+\}\z/
value = "{#{value}}"
end
drv.attrs[param.to_s.upcase] = value.to_s
end
::ODBC::Database.new.drvconnect(drv)
else
::ODBC::connect(opts[:database], opts[:user], opts[:password])
end
conn.autocommit = true
conn
end
def disconnect_connection(c)
c.disconnect
end
def execute(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
begin
r = log_connection_yield(sql, conn){conn.run(sql)}
yield(r) if defined?(yield)
rescue ::ODBC::Error, ArgumentError => e
raise_error(e)
ensure
r.drop if r
end
nil
end
end
def execute_dui(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
begin
log_connection_yield(sql, conn){conn.do(sql)}
rescue ::ODBC::Error, ArgumentError => e
raise_error(e)
end
end
end
private
def adapter_initialize
if (db_type = @opts[:db_type]) && (prok = Sequel::Database.load_adapter(db_type.to_sym, :map=>DATABASE_SETUP, :subdir=>'odbc'))
prok.call(self)
end
end
def connection_execute_method
:do
end
def database_error_classes
[::ODBC::Error]
end
def dataset_class_default
Dataset
end
def disconnect_error?(e, opts)
super || (e.is_a?(::ODBC::Error) && /\A08S01/.match(e.message))
end
end
class Dataset < Sequel::Dataset
def fetch_rows(sql)
execute(sql) do |s|
i = -1
cols = s.columns(true).map{|c| [output_identifier(c.name), c.type, i+=1]}
columns = cols.map{|c| c[0]}
self.columns = columns
s.each do |row|
hash = {}
cols.each do |n,t,j|
v = row[j]
# We can assume v is not false, so this shouldn't convert false to nil.
hash[n] = (convert_odbc_value(v, t) if v)
end
yield hash
end
end
self
end
private
def convert_odbc_value(v, t)
# When fetching a result set, the Ruby ODBC driver converts all ODBC
# SQL types to an equivalent Ruby type; with the exception of
# SQL_TYPE_DATE, SQL_TYPE_TIME and SQL_TYPE_TIMESTAMP.
#
# The conversions below are consistent with the mappings in
# ODBCColumn#mapSqlTypeToGenericType and Column#klass.
case v
when ::ODBC::TimeStamp
db.to_application_timestamp([v.year, v.month, v.day, v.hour, v.minute, v.second, v.fraction])
when ::ODBC::Time
Sequel::SQLTime.create(v.hour, v.minute, v.second)
when ::ODBC::Date
Date.new(v.year, v.month, v.day)
else
if t == ::ODBC::SQL_BIT
v == 1
else
v
end
end
end
def default_timestamp_format
"{ts '%Y-%m-%d %H:%M:%S'}"
end
def literal_date(v)
v.strftime("{d '%Y-%m-%d'}")
end
def literal_false
'0'
end
def literal_true
'1'
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/odbc/ 0000775 0000000 0000000 00000000000 14342141206 0017656 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/odbc/db2.rb 0000664 0000000 0000000 00000000370 14342141206 0020652 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/db2'
Sequel.synchronize do
Sequel::ODBC::DATABASE_SETUP[:db2] = proc do |db|
db.extend ::Sequel::DB2::DatabaseMethods
db.extend_datasets ::Sequel::DB2::DatasetMethods
end
end
sequel-5.63.0/lib/sequel/adapters/odbc/mssql.rb 0000664 0000000 0000000 00000003140 14342141206 0021340 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/mssql'
module Sequel
module ODBC
Sequel.synchronize do
DATABASE_SETUP[:mssql] = proc do |db|
db.extend Sequel::ODBC::MSSQL::DatabaseMethods
db.dataset_class = Sequel::ODBC::MSSQL::Dataset
db.send(:set_mssql_unicode_strings)
end
end
module MSSQL
module DatabaseMethods
include Sequel::MSSQL::DatabaseMethods
def execute_insert(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
begin
log_connection_yield(sql, conn){conn.do(sql)}
begin
last_insert_id_sql = 'SELECT SCOPE_IDENTITY()'
s = log_connection_yield(last_insert_id_sql, conn){conn.run(last_insert_id_sql)}
if (rows = s.fetch_all) and (row = rows.first) and (v = row.first)
Integer(v)
end
ensure
s.drop if s
end
rescue ::ODBC::Error => e
raise_error(e)
end
end
end
end
class Dataset < ODBC::Dataset
include Sequel::MSSQL::DatasetMethods
private
# Use ODBC format, not Microsoft format, as the ODBC layer does
# some translation, but allow for millisecond precision.
def default_timestamp_format
"{ts '%Y-%m-%d %H:%M:%S%N'}"
end
# Use ODBC format, not Microsoft format, as the ODBC layer does
# some translation.
def literal_date(v)
v.strftime("{d '%Y-%m-%d'}")
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/odbc/oracle.rb 0000664 0000000 0000000 00000000404 14342141206 0021446 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/oracle'
Sequel.synchronize do
Sequel::ODBC::DATABASE_SETUP[:oracle] = proc do |db|
db.extend ::Sequel::Oracle::DatabaseMethods
db.extend_datasets ::Sequel::Oracle::DatasetMethods
end
end
sequel-5.63.0/lib/sequel/adapters/oracle.rb 0000664 0000000 0000000 00000030205 14342141206 0020541 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'oci8'
require_relative 'shared/oracle'
module Sequel
module Oracle
class Database < Sequel::Database
include DatabaseMethods
set_adapter_scheme :oracle
# ORA-00028: your session has been killed
# ORA-01012: not logged on
# ORA-02396: exceeded maximum idle time, please connect again
# ORA-03113: end-of-file on communication channel
# ORA-03114: not connected to ORACLE
# ORA-03135: connection lost contact
CONNECTION_ERROR_CODES = [ 28, 1012, 2396, 3113, 3114, 3135 ].freeze
ORACLE_TYPES = {
:blob=>lambda{|b| Sequel::SQL::Blob.new(b.read)},
:clob=>:read.to_proc
}.freeze
# Hash of conversion procs for this database.
attr_reader :conversion_procs
def connect(server)
opts = server_opts(server)
if opts[:database]
dbname = opts[:host] ? \
"//#{opts[:host]}#{":#{opts[:port]}" if opts[:port]}/#{opts[:database]}" : opts[:database]
else
dbname = opts[:host]
end
conn = OCI8.new(opts[:user], opts[:password], dbname, opts[:privilege])
if prefetch_rows = opts.fetch(:prefetch_rows, 100)
conn.prefetch_rows = typecast_value_integer(prefetch_rows)
end
conn.autocommit = true
conn.non_blocking = true
# The ruby-oci8 gem which retrieves oracle columns with a type of
# DATE, TIMESTAMP, TIMESTAMP WITH TIME ZONE is complex based on the
# ruby version and Oracle version (9 or later)
# In the now standard case of Oracle 9 or later, the timezone
# is determined by the Oracle session timezone. Thus if the user
# requests Sequel provide UTC timezone to the application,
# we need to alter the session timezone to be UTC
if Sequel.application_timezone == :utc
conn.exec("ALTER SESSION SET TIME_ZONE='-00:00'")
end
class << conn
attr_reader :prepared_statements
end
conn.instance_variable_set(:@prepared_statements, {})
conn
end
def disconnect_connection(c)
c.logoff
rescue OCIException
nil
end
def execute(sql, opts=OPTS, &block)
_execute(nil, sql, opts, &block)
end
def execute_insert(sql, opts=OPTS)
_execute(:insert, sql, opts)
end
def freeze
@conversion_procs.freeze
super
end
private
def _execute(type, sql, opts=OPTS, &block)
synchronize(opts[:server]) do |conn|
begin
return execute_prepared_statement(conn, type, sql, opts, &block) if sql.is_a?(Symbol)
if args = opts[:arguments]
r = conn.parse(sql)
args = cursor_bind_params(conn, r, args)
nr = log_connection_yield(sql, conn, args){r.exec}
r = nr unless defined?(yield)
else
r = log_connection_yield(sql, conn){conn.exec(sql)}
end
if defined?(yield)
yield(r)
elsif type == :insert
last_insert_id(conn, opts)
else
r
end
rescue OCIException, RuntimeError => e
# ruby-oci8 is naughty and raises strings in some places
raise_error(e)
ensure
r.close if r.is_a?(::OCI8::Cursor)
end
end
end
def adapter_initialize
@autosequence = @opts[:autosequence]
@primary_key_sequences = {}
@conversion_procs = ORACLE_TYPES.dup
end
PS_TYPES = {'string'=>String, 'integer'=>Integer, 'float'=>Float,
'decimal'=>Float, 'date'=>Time, 'datetime'=>Time,
'time'=>Time, 'boolean'=>String, 'blob'=>OCI8::BLOB, 'clob'=>OCI8::CLOB}.freeze
def cursor_bind_params(conn, cursor, args)
i = 0
args.map do |arg, type|
i += 1
case arg
when true
arg = 'Y'
when false
arg = 'N'
when BigDecimal
arg = arg.to_f
when ::Sequel::SQL::Blob
arg = ::OCI8::BLOB.new(conn, arg)
when String
if type == 'clob'
arg = ::OCI8::CLOB.new(conn, arg)
end
end
cursor.bind_param(i, arg, PS_TYPES[type] || arg.class)
arg
end
end
def connection_execute_method
:exec
end
def database_error_classes
[OCIException, RuntimeError]
end
def database_specific_error_class(exception, opts)
return super unless exception.respond_to?(:code)
case exception.code
when 1400, 1407
NotNullConstraintViolation
when 1
UniqueConstraintViolation
when 2291, 2292
ForeignKeyConstraintViolation
when 2290
CheckConstraintViolation
when 8177
SerializationFailure
else
super
end
end
def dataset_class_default
Dataset
end
def execute_prepared_statement(conn, type, name, opts)
ps = prepared_statement(name)
sql = ps.prepared_sql
if cursora = conn.prepared_statements[name]
cursor, cursor_sql = cursora
if cursor_sql != sql
cursor.close
cursor = nil
end
end
unless cursor
cursor = log_connection_yield("PREPARE #{name}: #{sql}", conn){conn.parse(sql)}
conn.prepared_statements[name] = [cursor, sql]
end
args = cursor_bind_params(conn, cursor, opts[:arguments])
log_sql = "EXECUTE #{name}"
if ps.log_sql
log_sql += " ("
log_sql << sql
log_sql << ")"
end
r = log_connection_yield(log_sql, conn, args){cursor.exec}
if defined?(yield)
yield(cursor)
elsif type == :insert
last_insert_id(conn, opts)
else
r
end
end
def last_insert_id(conn, opts)
unless sequence = opts[:sequence]
if t = opts[:table]
sequence = sequence_for_table(t)
end
end
if sequence
sql = "SELECT #{literal(sequence)}.currval FROM dual"
begin
cursor = log_connection_yield(sql, conn){conn.exec(sql)}
row = cursor.fetch
row.each{|v| return (v.to_i if v)}
rescue OCIError
nil
ensure
cursor.close if cursor
end
end
end
def begin_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.begin', conn){conn.autocommit = false}
set_transaction_isolation(conn, opts)
end
def commit_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.commit', conn){conn.commit}
end
def disconnect_error?(e, opts)
super || (e.is_a?(::OCIError) && CONNECTION_ERROR_CODES.include?(e.code))
end
def oracle_column_type(h)
case h[:oci8_type]
when :number
case h[:scale]
when 0
:integer
when -127
:float
else
:decimal
end
when :date
:datetime
else
schema_column_type(h[:db_type])
end
end
def remove_transaction(conn, committed)
conn.autocommit = true
ensure
super
end
def rollback_transaction(conn, opts=OPTS)
log_connection_yield('Transaction.rollback', conn){conn.rollback}
end
def schema_parse_table(table, opts=OPTS)
schema, table = schema_and_table(table)
schema ||= opts[:schema]
schema_and_table = if ds = opts[:dataset]
ds.literal(schema ? SQL::QualifiedIdentifier.new(schema, table) : SQL::Identifier.new(table))
else
"#{"#{quote_identifier(schema)}." if schema}#{quote_identifier(table)}"
end
table_schema = []
m = output_identifier_meth(ds)
im = input_identifier_meth(ds)
# Primary Keys
ds = metadata_dataset.
from{[all_constraints.as(:cons), all_cons_columns.as(:cols)]}.
where{{
cols[:table_name]=>im.call(table),
cons[:constraint_type]=>'P',
cons[:constraint_name]=>cols[:constraint_name],
cons[:owner]=>cols[:owner]}}
ds = ds.where{{cons[:owner]=>im.call(schema)}} if schema
pks = ds.select_map{cols[:column_name]}
# Default values
defaults = begin
metadata_dataset.from(:all_tab_cols).
where(:table_name=>im.call(table)).
as_hash(:column_name, :data_default)
rescue DatabaseError
{}
end
metadata = synchronize(opts[:server]) do |conn|
begin
log_connection_yield("Connection.describe_table", conn){conn.describe_table(schema_and_table)}
rescue OCIError => e
raise_error(e)
end
end
metadata.columns.each do |column|
h = {
:primary_key => pks.include?(column.name),
:default => defaults[column.name],
:oci8_type => column.data_type,
:db_type => column.type_string,
:type_string => column.type_string,
:charset_form => column.charset_form,
:char_used => column.char_used?,
:char_size => column.char_size,
:data_size => column.data_size,
:precision => column.precision,
:scale => column.scale,
:fsprecision => column.fsprecision,
:lfprecision => column.lfprecision,
:allow_null => column.nullable?
}
h[:type] = oracle_column_type(h)
h[:auto_increment] = h[:type] == :integer if h[:primary_key]
h[:max_length] = h[:char_size] if h[:type] == :string
table_schema << [m.call(column.name), h]
end
table_schema
end
end
class Dataset < Sequel::Dataset
include DatasetMethods
# Oracle already supports named bind arguments, so use directly.
module ArgumentMapper
include Sequel::Dataset::ArgumentMapper
protected
# Return a hash with the same values as the given hash,
# but with the keys converted to strings.
def map_to_prepared_args(bind_vars)
prepared_args.map{|v, t| [bind_vars[v], t]}
end
private
# Oracle uses a : before the name of the argument for named
# arguments.
def prepared_arg(k)
y, type = k.to_s.split("__", 2)
prepared_args << [y.to_sym, type]
i = prepared_args.length
LiteralString.new(":#{i}")
end
end
BindArgumentMethods = prepared_statements_module(:bind, ArgumentMapper)
PreparedStatementMethods = prepared_statements_module(:prepare, BindArgumentMethods)
def fetch_rows(sql)
execute(sql) do |cursor|
cps = db.conversion_procs
cols = columns = cursor.get_col_names.map{|c| output_identifier(c)}
metadata = cursor.column_metadata
cm = cols.zip(metadata).map{|c, m| [c, cps[m.data_type]]}
self.columns = columns
while r = cursor.fetch
row = {}
r.zip(cm).each{|v, (c, cp)| row[c] = ((v && cp) ? cp.call(v) : v)}
yield row
end
end
self
end
# Oracle requires type specifiers for placeholders, at least
# if you ever want to use a nil/NULL value as the value for
# the placeholder.
def requires_placeholder_type_specifiers?
true
end
private
def literal_other_append(sql, v)
case v
when OraDate
literal_append(sql, db.to_application_timestamp(v))
when OCI8::CLOB
v.rewind
literal_append(sql, v.read)
else
super
end
end
def prepared_arg_placeholder
':'
end
def bound_variable_modules
[BindArgumentMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/postgres.rb 0000664 0000000 0000000 00000101361 14342141206 0021144 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative 'shared/postgres'
begin
require 'pg'
# :nocov:
Sequel::Postgres::PGError = PG::Error if defined?(PG::Error)
Sequel::Postgres::PGconn = PG::Connection if defined?(PG::Connection)
Sequel::Postgres::PGresult = PG::Result if defined?(PG::Result)
# Work around postgres-pr 0.7.0+ which ships with a pg.rb file
unless defined?(PG::Connection)
raise LoadError unless defined?(PGconn::CONNECTION_OK)
end
if defined?(PG::TypeMapByClass)
# :nocov:
type_map = Sequel::Postgres::PG_QUERY_TYPE_MAP = PG::TypeMapByClass.new
type_map[Integer] = PG::TextEncoder::Integer.new
type_map[FalseClass] = type_map[TrueClass] = PG::TextEncoder::Boolean.new
type_map[Float] = PG::TextEncoder::Float.new
end
Sequel::Postgres::USES_PG = true
rescue LoadError => e
# :nocov:
begin
require 'sequel/postgres-pr'
rescue LoadError
begin
require 'postgres-pr/postgres-compat'
rescue LoadError
raise e
end
end
Sequel::Postgres::USES_PG = false
# :nocov:
end
module Sequel
module Postgres
# :nocov:
if USES_PG
# Whether the given sequel_pg version integer is supported.
def self.sequel_pg_version_supported?(version)
version >= 10617
end
end
# :nocov:
# PGconn subclass for connection specific methods used with the
# pg or postgres-pr driver.
class Adapter < PGconn
# The underlying exception classes to reraise as disconnect errors
# instead of regular database errors.
DISCONNECT_ERROR_CLASSES = [IOError, Errno::EPIPE, Errno::ECONNRESET]
# :nocov:
if defined?(::PG::ConnectionBad)
# :nocov:
DISCONNECT_ERROR_CLASSES << ::PG::ConnectionBad
end
DISCONNECT_ERROR_CLASSES.freeze
disconnect_errors = [
'ERROR: cached plan must not change result type',
'could not receive data from server',
'no connection to the server',
'connection not open',
'connection is closed',
'terminating connection due to administrator command',
'PQconsumeInput() '
]
# Since exception class based disconnect checking may not work,
# also trying parsing the exception message to look for disconnect
# errors.
DISCONNECT_ERROR_RE = /\A#{Regexp.union(disconnect_errors)}/
if USES_PG
# Hash of prepared statements for this connection. Keys are
# string names of the server side prepared statement, and values
# are SQL strings.
attr_reader :prepared_statements
# :nocov:
unless public_method_defined?(:async_exec_params)
alias async_exec_params async_exec
end
elsif !const_defined?(:CONNECTION_OK)
# Handle old postgres-pr
# sequel-postgres-pr already implements this API
CONNECTION_OK = -1
# Escape bytea values. Uses historical format instead of hex
# format for maximum compatibility.
def escape_bytea(str)
str.gsub(/[\000-\037\047\134\177-\377]/n){|b| "\\#{sprintf('%o', b.each_byte{|x| break x}).rjust(3, '0')}"}
end
# Escape strings by doubling apostrophes. This only works if standard
# conforming strings are used.
def escape_string(str)
str.gsub("'", "''")
end
alias finish close
def async_exec(sql)
PGresult.new(@conn.query(sql))
end
def block(timeout=nil)
end
def status
CONNECTION_OK
end
class PGresult < ::PGresult
alias nfields num_fields
alias ntuples num_tuples
alias ftype type
alias fname fieldname
alias cmd_tuples cmdtuples
end
end
# :nocov:
# Raise a Sequel::DatabaseDisconnectError if a one of the disconnect
# error classes is raised, or a PGError is raised and the connection
# status cannot be determined or it is not OK.
def check_disconnect_errors
yield
rescue *DISCONNECT_ERROR_CLASSES => e
disconnect = true
raise(Sequel.convert_exception_class(e, Sequel::DatabaseDisconnectError))
rescue PGError => e
disconnect = false
begin
s = status
rescue PGError
disconnect = true
end
status_ok = (s == Adapter::CONNECTION_OK)
disconnect ||= !status_ok
disconnect ||= e.message =~ DISCONNECT_ERROR_RE
disconnect ? raise(Sequel.convert_exception_class(e, Sequel::DatabaseDisconnectError)) : raise
ensure
block if status_ok && !disconnect
end
# Execute the given SQL with this connection. If a block is given,
# yield the results, otherwise, return the number of changed rows.
def execute(sql, args=nil)
args = args.map{|v| @db.bound_variable_arg(v, self)} if args
q = check_disconnect_errors{execute_query(sql, args)}
begin
defined?(yield) ? yield(q) : q.cmd_tuples
ensure
q.clear if q && q.respond_to?(:clear)
end
end
private
# Return the PGResult containing the query results.
def execute_query(sql, args)
@db.log_connection_yield(sql, self, args){args ? async_exec_params(sql, args) : async_exec(sql)}
end
end
class Database < Sequel::Database
include Sequel::Postgres::DatabaseMethods
set_adapter_scheme :postgresql
set_adapter_scheme :postgres
# Convert given argument so that it can be used directly by pg. Currently, pg doesn't
# handle fractional seconds in Time/DateTime or blobs with "\0". Only public for use by
# the adapter, shouldn't be used by external code.
def bound_variable_arg(arg, conn)
case arg
when Sequel::SQL::Blob
{:value=>arg, :type=>17, :format=>1}
# :nocov:
# Not covered by tests as tests use pg_extended_date_support
# extension, which has basically the same code.
when DateTime, Time
literal(arg)
# :nocov:
else
arg
end
end
# Call a procedure with the given name and arguments. Returns a hash if the procedure
# returns a value, and nil otherwise. Example:
#
# DB.call_procedure(:foo, 1, 2)
# # CALL foo(1, 2)
def call_procedure(name, *args)
dataset.send(:call_procedure, name, args)
end
# Connects to the database. In addition to the standard database
# options, using the :encoding or :charset option changes the
# client encoding for the connection, :connect_timeout is a
# connection timeout in seconds, :sslmode sets whether postgres's
# sslmode, and :notice_receiver handles server notices in a proc.
# :connect_timeout, :driver_options, :sslmode, and :notice_receiver
# are only supported if the pg driver is used.
def connect(server)
opts = server_opts(server)
if USES_PG
connection_params = {
:host => opts[:host],
:port => opts[:port],
:dbname => opts[:database],
:user => opts[:user],
:password => opts[:password],
:connect_timeout => opts[:connect_timeout] || 20,
:sslmode => opts[:sslmode],
:sslrootcert => opts[:sslrootcert]
}.delete_if { |key, value| blank_object?(value) }
# :nocov:
connection_params.merge!(opts[:driver_options]) if opts[:driver_options]
# :nocov:
conn = Adapter.connect(opts[:conn_str] || connection_params)
conn.instance_variable_set(:@prepared_statements, {})
if receiver = opts[:notice_receiver]
conn.set_notice_receiver(&receiver)
end
# :nocov:
if conn.respond_to?(:type_map_for_queries=) && defined?(PG_QUERY_TYPE_MAP)
# :nocov:
conn.type_map_for_queries = PG_QUERY_TYPE_MAP
end
# :nocov:
else
unless typecast_value_boolean(@opts.fetch(:force_standard_strings, true))
raise Error, "Cannot create connection using postgres-pr unless force_standard_strings is set"
end
conn = Adapter.connect(
(opts[:host] unless blank_object?(opts[:host])),
opts[:port] || 5432,
nil, '',
opts[:database],
opts[:user],
opts[:password]
)
end
# :nocov:
conn.instance_variable_set(:@db, self)
# :nocov:
if encoding = opts[:encoding] || opts[:charset]
if conn.respond_to?(:set_client_encoding)
conn.set_client_encoding(encoding)
else
conn.async_exec("set client_encoding to '#{encoding}'")
end
end
# :nocov:
connection_configuration_sqls(opts).each{|sql| conn.execute(sql)}
conn
end
# Always false, support was moved to pg_extended_date_support extension.
# Needs to stay defined here so that sequel_pg works.
def convert_infinite_timestamps
false
end
# Enable pg_extended_date_support extension if symbol or string is given.
def convert_infinite_timestamps=(v)
case v
when Symbol, String, true
extension(:pg_extended_date_support)
self.convert_infinite_timestamps = v
end
end
def disconnect_connection(conn)
conn.finish
rescue PGError, IOError
nil
end
# :nocov:
if USES_PG && Object.const_defined?(:PG) && ::PG.const_defined?(:Constants) && ::PG::Constants.const_defined?(:PG_DIAG_SCHEMA_NAME)
# :nocov:
# Return a hash of information about the related PGError (or Sequel::DatabaseError that
# wraps a PGError), with the following entries (any of which may be +nil+):
#
# :schema :: The schema name related to the error
# :table :: The table name related to the error
# :column :: the column name related to the error
# :constraint :: The constraint name related to the error
# :type :: The datatype name related to the error
# :severity :: The severity of the error (e.g. "ERROR")
# :sql_state :: The SQL state code related to the error
# :message_primary :: A single line message related to the error
# :message_detail :: Any detail supplementing the primary message
# :message_hint :: Possible suggestion about how to fix the problem
# :statement_position :: Character offset in statement submitted by client where error occurred (starting at 1)
# :internal_position :: Character offset in internal statement where error occurred (starting at 1)
# :internal_query :: Text of internally-generated statement where error occurred
# :source_file :: PostgreSQL source file where the error occurred
# :source_line :: Line number of PostgreSQL source file where the error occurred
# :source_function :: Function in PostgreSQL source file where the error occurred
#
# This requires a PostgreSQL 9.3+ server and 9.3+ client library,
# and ruby-pg 0.16.0+ to be supported.
def error_info(e)
e = e.wrapped_exception if e.is_a?(DatabaseError)
r = e.result
{
:schema => r.error_field(::PG::PG_DIAG_SCHEMA_NAME),
:table => r.error_field(::PG::PG_DIAG_TABLE_NAME),
:column => r.error_field(::PG::PG_DIAG_COLUMN_NAME),
:constraint => r.error_field(::PG::PG_DIAG_CONSTRAINT_NAME),
:type => r.error_field(::PG::PG_DIAG_DATATYPE_NAME),
:severity => r.error_field(::PG::PG_DIAG_SEVERITY),
:sql_state => r.error_field(::PG::PG_DIAG_SQLSTATE),
:message_primary => r.error_field(::PG::PG_DIAG_MESSAGE_PRIMARY),
:message_detail => r.error_field(::PG::PG_DIAG_MESSAGE_DETAIL),
:message_hint => r.error_field(::PG::PG_DIAG_MESSAGE_HINT),
:statement_position => r.error_field(::PG::PG_DIAG_STATEMENT_POSITION),
:internal_position => r.error_field(::PG::PG_DIAG_INTERNAL_POSITION),
:internal_query => r.error_field(::PG::PG_DIAG_INTERNAL_QUERY),
:source_file => r.error_field(::PG::PG_DIAG_SOURCE_FILE),
:source_line => r.error_field(::PG::PG_DIAG_SOURCE_LINE),
:source_function => r.error_field(::PG::PG_DIAG_SOURCE_FUNCTION)
}
end
end
def execute(sql, opts=OPTS, &block)
synchronize(opts[:server]){|conn| check_database_errors{_execute(conn, sql, opts, &block)}}
end
# :nocov:
if USES_PG
# :nocov:
# +copy_table+ uses PostgreSQL's +COPY TO STDOUT+ SQL statement to return formatted
# results directly to the caller. This method is only supported if pg is the
# underlying ruby driver. This method should only be called if you want
# results returned to the client. If you are using +COPY TO+
# with a filename, you should just use +run+ instead of this method.
#
# The table argument supports the following types:
#
# String :: Uses the first argument directly as literal SQL. If you are using
# a version of PostgreSQL before 9.0, you will probably want to
# use a string if you are using any options at all, as the syntax
# Sequel uses for options is only compatible with PostgreSQL 9.0+.
# This should be the full COPY statement passed to PostgreSQL, not
# just the SELECT query. If a string is given, the :format and
# :options options are ignored.
# Dataset :: Uses a query instead of a table name when copying.
# other :: Uses a table name (usually a symbol) when copying.
#
# The following options are respected:
#
# :format :: The format to use. text is the default, so this should be :csv or :binary.
# :options :: An options SQL string to use, which should contain comma separated options.
# :server :: The server on which to run the query.
#
# If a block is provided, the method continually yields to the block, one yield
# per row. If a block is not provided, a single string is returned with all
# of the data.
def copy_table(table, opts=OPTS)
synchronize(opts[:server]) do |conn|
conn.execute(copy_table_sql(table, opts))
begin
if defined?(yield)
while buf = conn.get_copy_data
yield buf
end
b = nil
else
b = String.new
b << buf while buf = conn.get_copy_data
end
res = conn.get_last_result
if !res || res.result_status != 1
raise PG::NotAllCopyDataRetrieved, "Not all COPY data retrieved"
end
b
rescue => e
raise_error(e, :disconnect=>true)
ensure
if buf && !e
raise DatabaseDisconnectError, "disconnecting as a partial COPY may leave the connection in an unusable state"
end
end
end
end
# +copy_into+ uses PostgreSQL's +COPY FROM STDIN+ SQL statement to do very fast inserts
# into a table using input preformatting in either CSV or PostgreSQL text format.
# This method is only supported if pg 0.14.0+ is the underlying ruby driver.
# This method should only be called if you want
# results returned to the client. If you are using +COPY FROM+
# with a filename, you should just use +run+ instead of this method.
#
# The following options are respected:
#
# :columns :: The columns to insert into, with the same order as the columns in the
# input data. If this isn't given, uses all columns in the table.
# :data :: The data to copy to PostgreSQL, which should already be in CSV or PostgreSQL
# text format. This can be either a string, or any object that responds to
# each and yields string.
# :format :: The format to use. text is the default, so this should be :csv or :binary.
# :options :: An options SQL string to use, which should contain comma separated options.
# :server :: The server on which to run the query.
#
# If a block is provided and :data option is not, this will yield to the block repeatedly.
# The block should return a string, or nil to signal that it is finished.
def copy_into(table, opts=OPTS)
data = opts[:data]
data = Array(data) if data.is_a?(String)
if defined?(yield) && data
raise Error, "Cannot provide both a :data option and a block to copy_into"
elsif !defined?(yield) && !data
raise Error, "Must provide either a :data option or a block to copy_into"
end
synchronize(opts[:server]) do |conn|
conn.execute(copy_into_sql(table, opts))
begin
if defined?(yield)
while buf = yield
conn.put_copy_data(buf)
end
else
data.each{|buff| conn.put_copy_data(buff)}
end
rescue Exception => e
conn.put_copy_end("ruby exception occurred while copying data into PostgreSQL")
ensure
conn.put_copy_end unless e
while res = conn.get_result
raise e if e
check_database_errors{res.check}
end
end
end
end
# Listens on the given channel (or multiple channels if channel is an array), waiting for notifications.
# After a notification is received, or the timeout has passed, stops listening to the channel. Options:
#
# :after_listen :: An object that responds to +call+ that is called with the underlying connection after the LISTEN
# statement is sent, but before the connection starts waiting for notifications.
# :loop :: Whether to continually wait for notifications, instead of just waiting for a single
# notification. If this option is given, a block must be provided. If this object responds to +call+, it is
# called with the underlying connection after each notification is received (after the block is called).
# If a :timeout option is used, and a callable object is given, the object will also be called if the
# timeout expires. If :loop is used and you want to stop listening, you can either break from inside the
# block given to #listen, or you can throw :stop from inside the :loop object's call method or the block.
# :server :: The server on which to listen, if the sharding support is being used.
# :timeout :: How long to wait for a notification, in seconds (can provide a float value for fractional seconds).
# If this object responds to +call+, it will be called and should return the number of seconds to wait.
# If the loop option is also specified, the object will be called on each iteration to obtain a new
# timeout value. If not given or nil, waits indefinitely.
#
# This method is only supported if pg is used as the underlying ruby driver. It returns the
# channel the notification was sent to (as a string), unless :loop was used, in which case it returns nil.
# If a block is given, it is yielded 3 arguments:
# * the channel the notification was sent to (as a string)
# * the backend pid of the notifier (as an integer),
# * and the payload of the notification (as a string or nil).
def listen(channels, opts=OPTS, &block)
check_database_errors do
synchronize(opts[:server]) do |conn|
begin
channels = Array(channels)
channels.each do |channel|
sql = "LISTEN ".dup
dataset.send(:identifier_append, sql, channel)
conn.execute(sql)
end
opts[:after_listen].call(conn) if opts[:after_listen]
timeout = opts[:timeout]
if timeout
timeout_block = timeout.respond_to?(:call) ? timeout : proc{timeout}
end
if l = opts[:loop]
raise Error, 'calling #listen with :loop requires a block' unless block
loop_call = l.respond_to?(:call)
catch(:stop) do
while true
t = timeout_block ? [timeout_block.call] : []
conn.wait_for_notify(*t, &block)
l.call(conn) if loop_call
end
end
nil
else
t = timeout_block ? [timeout_block.call] : []
conn.wait_for_notify(*t, &block)
end
ensure
conn.execute("UNLISTEN *")
end
end
end
end
end
private
# Execute the given SQL string or prepared statement on the connection object.
def _execute(conn, sql, opts, &block)
if sql.is_a?(Symbol)
execute_prepared_statement(conn, sql, opts, &block)
else
conn.execute(sql, opts[:arguments], &block)
end
end
# Execute the prepared statement name with the given arguments on the connection.
def _execute_prepared_statement(conn, ps_name, args, opts)
conn.exec_prepared(ps_name, args)
end
# Add the primary_keys and primary_key_sequences instance variables,
# so we can get the correct return values for inserted rows.
def adapter_initialize
@use_iso_date_format = typecast_value_boolean(@opts.fetch(:use_iso_date_format, true))
initialize_postgres_adapter
# :nocov:
add_conversion_proc(17, method(:unescape_bytea)) if USES_PG
add_conversion_proc(1082, TYPE_TRANSLATOR_DATE) if @use_iso_date_format
# :nocov:
self.convert_infinite_timestamps = @opts[:convert_infinite_timestamps]
end
# Convert exceptions raised from the block into DatabaseErrors.
def check_database_errors
yield
rescue => e
raise_error(e, :classes=>database_error_classes)
end
# Set the DateStyle to ISO if configured, for faster date parsing.
def connection_configuration_sqls(opts=@opts)
sqls = super
# :nocov:
sqls << "SET DateStyle = 'ISO'" if @use_iso_date_format
# :nocov:
sqls
end
# :nocov:
if USES_PG
def unescape_bytea(s)
::Sequel::SQL::Blob.new(Adapter.unescape_bytea(s))
end
end
# :nocov:
DATABASE_ERROR_CLASSES = [PGError].freeze
def database_error_classes
DATABASE_ERROR_CLASSES
end
def disconnect_error?(exception, opts)
super ||
Adapter::DISCONNECT_ERROR_CLASSES.any?{|klass| exception.is_a?(klass)} ||
exception.message =~ Adapter::DISCONNECT_ERROR_RE
end
def database_exception_sqlstate(exception, opts)
# :nocov:
if exception.respond_to?(:result) && (result = exception.result)
# :nocov:
result.error_field(PGresult::PG_DIAG_SQLSTATE)
end
end
def dataset_class_default
Dataset
end
# Execute the prepared statement with the given name on an available
# connection, using the given args. If the connection has not prepared
# a statement with the given name yet, prepare it. If the connection
# has prepared a statement with the same name and different SQL,
# deallocate that statement first and then prepare this statement.
# If a block is given, yield the result, otherwise, return the number
# of rows changed.
def execute_prepared_statement(conn, name, opts=OPTS, &block)
ps = prepared_statement(name)
sql = ps.prepared_sql
ps_name = name.to_s
if args = opts[:arguments]
args = args.map{|arg| bound_variable_arg(arg, conn)}
end
unless conn.prepared_statements[ps_name] == sql
conn.execute("DEALLOCATE #{ps_name}") if conn.prepared_statements.include?(ps_name)
conn.check_disconnect_errors{log_connection_yield("PREPARE #{ps_name} AS #{sql}", conn){conn.prepare(ps_name, sql)}}
conn.prepared_statements[ps_name] = sql
end
log_sql = "EXECUTE #{ps_name}"
if ps.log_sql
log_sql += " ("
log_sql << sql
log_sql << ")"
end
q = conn.check_disconnect_errors{log_connection_yield(log_sql, conn, args){_execute_prepared_statement(conn, ps_name, args, opts)}}
begin
defined?(yield) ? yield(q) : q.cmd_tuples
ensure
q.clear if q && q.respond_to?(:clear)
end
end
# Don't log, since logging is done by the underlying connection.
def log_connection_execute(conn, sql)
conn.execute(sql)
end
def rollback_transaction(conn, opts=OPTS)
super unless conn.transaction_status == 0
end
end
class Dataset < Sequel::Dataset
include Sequel::Postgres::DatasetMethods
def fetch_rows(sql)
return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor]
execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}}
end
# Use a cursor for paging.
def paged_each(opts=OPTS, &block)
unless defined?(yield)
return enum_for(:paged_each, opts)
end
use_cursor(opts).each(&block)
end
# Uses a cursor for fetching records, instead of fetching the entire result
# set at once. Note this uses a transaction around the cursor usage by
# default and can be changed using `hold: true` as described below.
# Cursors can be used to process large datasets without holding all rows
# in memory (which is what the underlying drivers may do by default).
# Options:
#
# :cursor_name :: The name assigned to the cursor (default 'sequel_cursor').
# Nested cursors require different names.
# :hold :: Declare the cursor WITH HOLD and don't use transaction around the
# cursor usage.
# :rows_per_fetch :: The number of rows per fetch (default 1000). Higher
# numbers result in fewer queries but greater memory use.
#
# Usage:
#
# DB[:huge_table].use_cursor.each{|row| p row}
# DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row}
# DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
#
# This is untested with the prepared statement/bound variable support,
# and unlikely to work with either.
def use_cursor(opts=OPTS)
clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts))
end
# Replace the WHERE clause with one that uses CURRENT OF with the given
# cursor name (or the default cursor name). This allows you to update a
# large dataset by updating individual rows while processing the dataset
# via a cursor:
#
# DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row|
# DB[:huge_table].where_current_of.update(column: ruby_method(row))
# end
def where_current_of(cursor_name='sequel_cursor')
clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name)))
end
# :nocov:
if USES_PG
# :nocov:
PREPARED_ARG_PLACEHOLDER = LiteralString.new('$').freeze
# PostgreSQL specific argument mapper used for mapping the named
# argument hash to a array with numbered arguments. Only used with
# the pg driver.
module ArgumentMapper
include Sequel::Dataset::ArgumentMapper
protected
# An array of bound variable values for this query, in the correct order.
def map_to_prepared_args(hash)
prepared_args.map{|k| hash[k.to_sym]}
end
private
def prepared_arg(k)
y = k
if i = prepared_args.index(y)
i += 1
else
prepared_args << y
i = prepared_args.length
end
LiteralString.new("#{prepared_arg_placeholder}#{i}")
end
end
BindArgumentMethods = prepared_statements_module(:bind, [ArgumentMapper], %w'execute execute_dui')
PreparedStatementMethods = prepared_statements_module(:prepare, BindArgumentMethods, %w'execute execute_dui')
private
def bound_variable_modules
[BindArgumentMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
# PostgreSQL uses $N for placeholders instead of ?, so use a $
# as the placeholder.
def prepared_arg_placeholder
PREPARED_ARG_PLACEHOLDER
end
end
private
# Generate and execute a procedure call.
def call_procedure(name, args)
sql = String.new
sql << "CALL "
identifier_append(sql, name)
sql << "("
expression_list_append(sql, args)
sql << ")"
with_sql_first(sql)
end
# Use a cursor to fetch groups of records at a time, yielding them to the block.
def cursor_fetch_rows(sql)
server_opts = {:server=>@opts[:server] || :read_only}
cursor = @opts[:cursor]
hold = cursor[:hold]
cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor')
rows_per_fetch = cursor[:rows_per_fetch].to_i
db.public_send(*(hold ? [:synchronize, server_opts[:server]] : [:transaction, server_opts])) do
begin
execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts)
rows_per_fetch = 1000 if rows_per_fetch <= 0
fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}"
cols = nil
# Load columns only in the first fetch, so subsequent fetches are faster
execute(fetch_sql) do |res|
cols = fetch_rows_set_cols(res)
yield_hash_rows(res, cols){|h| yield h}
return if res.ntuples < rows_per_fetch
end
while true
execute(fetch_sql) do |res|
yield_hash_rows(res, cols){|h| yield h}
return if res.ntuples < rows_per_fetch
end
end
rescue Exception => e
raise
ensure
begin
execute_ddl("CLOSE #{cursor_name}", server_opts)
rescue
raise e if e
raise
end
end
end
end
# Set the columns based on the result set, and return the array of
# field numers, type conversion procs, and name symbol arrays.
def fetch_rows_set_cols(res)
cols = []
procs = db.conversion_procs
res.nfields.times do |fieldnum|
cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))]
end
self.columns = cols.map{|c| c[1]}
cols
end
# Use the driver's escape_bytea
def literal_blob_append(sql, v)
sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'"
end
# Use the driver's escape_string
def literal_string_append(sql, v)
sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'"
end
# For each row in the result set, yield a hash with column name symbol
# keys and typecasted values.
def yield_hash_rows(res, cols)
ntuples = res.ntuples
recnum = 0
while recnum < ntuples
fieldnum = 0
nfields = cols.length
converted_rec = {}
while fieldnum < nfields
type_proc, fieldsym = cols[fieldnum]
value = res.getvalue(recnum, fieldnum)
converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value
fieldnum += 1
end
yield converted_rec
recnum += 1
end
end
end
end
end
# :nocov:
if Sequel::Postgres::USES_PG && !ENV['NO_SEQUEL_PG']
begin
require 'sequel_pg'
if defined?(Gem) &&
(sequel_pg_spec = Gem.loaded_specs['sequel_pg'] rescue nil) &&
(sequel_pg_spec.version < Gem::Version.new('1.6.17'))
raise Sequel::Error, "the installed sequel_pg is too old, please update to at least sequel_pg-1.6.17"
end
rescue LoadError
end
end
# :nocov:
sequel-5.63.0/lib/sequel/adapters/postgresql.rb 0000664 0000000 0000000 00000000073 14342141206 0021477 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative 'postgres'
sequel-5.63.0/lib/sequel/adapters/shared/ 0000775 0000000 0000000 00000000000 14342141206 0020215 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/shared/access.rb 0000664 0000000 0000000 00000022121 14342141206 0022001 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/emulate_offset_with_reverse_and_count'
require_relative '../utils/unmodified_identifiers'
require_relative '../utils/columns_limit_1'
module Sequel
module Access
Sequel::Database.set_shared_adapter_scheme(:access, self)
module DatabaseMethods
include UnmodifiedIdentifiers::DatabaseMethods
def database_type
:access
end
# Doesn't work, due to security restrictions on MSysObjects
#def tables
# from(:MSysObjects).where(Type: 1, Flags: 0).select_map(:Name).map(&:to_sym)
#end
# Access doesn't support renaming tables from an SQL query,
# so create a copy of the table and then drop the from table.
def rename_table(from_table, to_table)
create_table(to_table, :as=>from(from_table))
drop_table(from_table)
end
# Access uses type Counter for an autoincrementing keys
def serial_primary_key_options
{:primary_key => true, :type=>:Counter}
end
private
def alter_table_set_column_type_sql(table, op)
"ALTER COLUMN #{quote_identifier(op[:name])} #{type_literal(op)}"
end
# Access doesn't support CREATE TABLE AS, it only supports SELECT INTO.
# Emulating CREATE TABLE AS using SELECT INTO is only possible if a dataset
# is given as the argument, it can't work with a string, so raise an
# Error if a string is given.
def create_table_as(name, ds, options)
raise(Error, "must provide dataset instance as value of create_table :as option on Access") unless ds.is_a?(Sequel::Dataset)
run(ds.into(name).sql)
end
DATABASE_ERROR_REGEXPS = {
/The changes you requested to the table were not successful because they would create duplicate values in the index, primary key, or relationship/ => UniqueConstraintViolation,
/You cannot add or change a record because a related record is required|The record cannot be deleted or changed because table/ => ForeignKeyConstraintViolation,
/One or more values are prohibited by the validation rule/ => CheckConstraintViolation,
/You must enter a value in the .+ field|cannot contain a Null value because the Required property for this field is set to True/ => NotNullConstraintViolation,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Access's Byte type will accept much larger values,
# even though it only stores 0-255. Do not set min/max
# values for the Byte type.
def column_schema_integer_min_max_values(db_type)
return if /byte/i =~ db_type
super
end
def drop_index_sql(table, op)
"DROP INDEX #{quote_identifier(op[:name] || default_index_name(table, op[:columns]))} ON #{quote_schema_table(table)}"
end
# Access doesn't have a 64-bit integer type, so use integer and hope
# the user isn't using more than 32 bits.
def type_literal_generic_bignum_symbol(column)
:integer
end
# Access doesn't have a true boolean class, so it uses bit
def type_literal_generic_trueclass(column)
:bit
end
# Access uses image type for blobs
def type_literal_generic_file(column)
:image
end
end
module DatasetMethods
include(Module.new do
Dataset.def_sql_method(self, :select, %w'select distinct limit columns into from join where group order having compounds')
end)
include EmulateOffsetWithReverseAndCount
include UnmodifiedIdentifiers::DatasetMethods
include ::Sequel::Dataset::ColumnsLimit1
EXTRACT_MAP = {:year=>"'yyyy'", :month=>"'m'", :day=>"'d'", :hour=>"'h'", :minute=>"'n'", :second=>"'s'"}.freeze
EXTRACT_MAP.each_value(&:freeze)
OPS = {:'%'=>' Mod '.freeze, :'||'=>' & '.freeze}.freeze
CAST_TYPES = {String=>:CStr, Integer=>:CLng, Date=>:CDate, Time=>:CDate, DateTime=>:CDate, Numeric=>:CDec, BigDecimal=>:CDec, File=>:CStr, Float=>:CDbl, TrueClass=>:CBool, FalseClass=>:CBool}.freeze
# Access doesn't support CASE, so emulate it with nested IIF function calls.
def case_expression_sql_append(sql, ce)
literal_append(sql, ce.with_merged_expression.conditions.reverse.inject(ce.default){|exp,(cond,val)| Sequel::SQL::Function.new(:IIF, cond, val, exp)})
end
# Access doesn't support CAST, it uses separate functions for
# type conversion
def cast_sql_append(sql, expr, type)
sql << CAST_TYPES.fetch(type, type).to_s
sql << '('
literal_append(sql, expr)
sql << ')'
end
def complex_expression_sql_append(sql, op, args)
case op
when :ILIKE
complex_expression_sql_append(sql, :LIKE, args)
when :'NOT ILIKE'
complex_expression_sql_append(sql, :'NOT LIKE', args)
when :'!='
sql << '('
literal_append(sql, args[0])
sql << ' <> '
literal_append(sql, args[1])
sql << ')'
when :'%', :'||'
sql << '('
c = false
op_str = OPS[op]
args.each do |a|
sql << op_str if c
literal_append(sql, a)
c ||= true
end
sql << ')'
when :**
sql << '('
literal_append(sql, args[0])
sql << ' ^ '
literal_append(sql, args[1])
sql << ')'
when :extract
part = args[0]
raise(Sequel::Error, "unsupported extract argument: #{part.inspect}") unless format = EXTRACT_MAP[part]
sql << "datepart(" << format.to_s << ', '
literal_append(sql, args[1])
sql << ')'
else
super
end
end
# Use Date(), Now(), and Time() for CURRENT_DATE, CURRENT_TIMESTAMP, and CURRENT_TIME
def constant_sql_append(sql, constant)
case constant
when :CURRENT_DATE
sql << 'Date()'
when :CURRENT_TIMESTAMP
sql << 'Now()'
when :CURRENT_TIME
sql << 'Time()'
else
super
end
end
# Emulate cross join by using multiple tables in the FROM clause.
def cross_join(table)
clone(:from=>@opts[:from] + [table])
end
# Access uses [] to escape metacharacters, instead of backslashes.
def escape_like(string)
string.gsub(/[\\*#?\[]/){|m| "[#{m}]"}
end
# Specify a table for a SELECT ... INTO query.
def into(table)
clone(:into => table)
end
# Access uses [] for quoting identifiers, and can't handle
# ] inside identifiers.
def quoted_identifier_append(sql, v)
sql << '[' << v.to_s << ']'
end
# Access does not support derived column lists.
def supports_derived_column_lists?
false
end
# Access doesn't support INTERSECT or EXCEPT
def supports_intersect_except?
false
end
# Access does not support IS TRUE
def supports_is_true?
false
end
# Access doesn't support JOIN USING
def supports_join_using?
false
end
# Access does not support multiple columns for the IN/NOT IN operators
def supports_multiple_column_in?
false
end
# Access doesn't support truncate, so do a delete instead.
def truncate
delete
nil
end
private
# Access uses # to quote dates
def literal_date(d)
d.strftime('#%Y-%m-%d#')
end
# Access uses # to quote datetimes
def literal_datetime(t)
t.strftime('#%Y-%m-%d %H:%M:%S#')
end
alias literal_time literal_datetime
# Use 0 for false on MSSQL
def literal_false
'0'
end
# Use -1 for true on MSSQL
def literal_true
'-1'
end
# Emulate the char_length function with len
def native_function_name(emulated_function)
if emulated_function == :char_length
'len'
else
super
end
end
# Access does not natively support NULLS FIRST/LAST.
def requires_emulating_nulls_first?
true
end
# Access doesn't support ESCAPE for LIKE.
def requires_like_escape?
false
end
# Access requires parentheses when joining more than one table
def select_from_sql(sql)
if f = @opts[:from]
sql << ' FROM '
if (j = @opts[:join]) && !j.empty?
sql << ('(' * j.length)
end
source_list_append(sql, f)
end
end
def select_into_sql(sql)
if i = @opts[:into]
sql << " INTO "
identifier_append(sql, i)
end
end
# Access requires parentheses when joining more than one table
def select_join_sql(sql)
if js = @opts[:join]
js.each do |j|
literal_append(sql, j)
sql << ')'
end
end
end
# Access uses TOP for limits
def select_limit_sql(sql)
if l = @opts[:limit]
sql << " TOP "
literal_append(sql, l)
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/db2.rb 0000664 0000000 0000000 00000040462 14342141206 0021217 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/emulate_offset_with_row_number'
require_relative '../utils/columns_limit_1'
module Sequel
module DB2
Sequel::Database.set_shared_adapter_scheme(:db2, self)
module DatabaseMethods
# Whether to use clob as the generic File type, false by default.
attr_accessor :use_clob_as_blob
def database_type
:db2
end
# Return the database version as a string. Don't rely on this,
# it may return an integer in the future.
def db2_version
return @db2_version if defined?(@db2_version)
@db2_version = metadata_dataset.with_sql("select service_level from sysibmadm.env_inst_info").first[:service_level]
end
alias_method :server_version, :db2_version
def freeze
db2_version
offset_strategy
super
end
# Use SYSIBM.SYSCOLUMNS to get the information on the tables.
def schema_parse_table(table, opts = OPTS)
m = output_identifier_meth(opts[:dataset])
im = input_identifier_meth(opts[:dataset])
metadata_dataset.with_sql("SELECT * FROM SYSIBM.SYSCOLUMNS WHERE TBNAME = #{literal(im.call(table))} ORDER BY COLNO").
collect do |column|
column[:db_type] = column.delete(:typename)
if column[:db_type] =~ /\A(VAR)?CHAR\z/
column[:db_type] << "(#{column[:length]})"
end
if column[:db_type] == "DECIMAL"
column[:db_type] << "(#{column[:longlength]},#{column[:scale]})"
end
column[:allow_null] = column.delete(:nulls) == 'Y'
identity = column.delete(:identity) == 'Y'
if column[:primary_key] = identity || !column[:keyseq].nil?
column[:auto_increment] = identity
end
column[:type] = schema_column_type(column[:db_type])
column[:max_length] = column[:longlength] if column[:type] == :string
[ m.call(column.delete(:name)), column]
end
end
# Use SYSCAT.TABLES to get the tables for the database
def tables
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='T' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.TABLES to get the views for the database
def views
metadata_dataset.
with_sql("SELECT TABNAME FROM SYSCAT.TABLES WHERE TYPE='V' AND OWNER = #{literal(input_identifier_meth.call(opts[:user]))}").
all.map{|h| output_identifier_meth.call(h[:tabname]) }
end
# Use SYSCAT.INDEXES to get the indexes for the table
def indexes(table, opts = OPTS)
m = output_identifier_meth
table = table.value if table.is_a?(Sequel::SQL::Identifier)
indexes = {}
metadata_dataset.
from(Sequel[:syscat][:indexes]).
select(:indname, :uniquerule, :colnames).
where(:tabname=>input_identifier_meth.call(table), :system_required=>0).
each do |r|
indexes[m.call(r[:indname])] = {:unique=>(r[:uniquerule]=='U'), :columns=>r[:colnames][1..-1].split('+').map{|v| m.call(v)}}
end
indexes
end
def offset_strategy
return @offset_strategy if defined?(@offset_strategy)
@offset_strategy = case strategy = opts[:offset_strategy].to_s
when "limit_offset", "offset_fetch"
opts[:offset_strategy] = strategy.to_sym
else
opts[:offset_strategy] = :emulate
end
end
# DB2 supports transaction isolation levels.
def supports_transaction_isolation_levels?
true
end
# On DB2, a table might need to be REORGed if you are testing existence
# of it. This REORGs automatically if the database raises a specific
# error that indicates it should be REORGed.
def table_exists?(name)
v ||= false # only retry once
sch, table_name = schema_and_table(name)
name = SQL::QualifiedIdentifier.new(sch, table_name) if sch
from(name).first
true
rescue DatabaseError => e
if e.to_s =~ /Operation not allowed for reason code "7" on table/ && v == false
# table probably needs reorg
reorg(name)
v = true
retry
end
false
end
private
def alter_table_sql(table, op)
case op[:op]
when :add_column
if op[:primary_key] && op[:auto_increment] && op[:type] == Integer
[
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op.merge(:auto_increment=>false, :primary_key=>false, :default=>0, :null=>false))}",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} DROP DEFAULT",
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{literal(op[:name])} SET #{auto_increment_sql}"
]
else
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
end
when :drop_column
"ALTER TABLE #{quote_schema_table(table)} DROP #{column_definition_sql(op)}"
when :rename_column # renaming is only possible after db2 v9.7
"ALTER TABLE #{quote_schema_table(table)} RENAME COLUMN #{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name])}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DATA TYPE #{type_literal(op)}"
when :set_column_default
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} SET DEFAULT #{literal(op[:default])}"
when :add_constraint
if op[:type] == :unique
sqls = op[:columns].map{|c| ["ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(c)} SET NOT NULL", reorg_sql(table)]}
sqls << super
sqls.flatten
else
super
end
else
super
end
end
# REORG the related table whenever it is altered. This is not always
# required, but it is necessary for compatibilty with other Sequel
# code in many cases.
def apply_alter_table(name, ops)
alter_table_sql_list(name, ops).each do |sql|
execute_ddl(sql)
reorg(name)
end
end
# DB2 uses an identity column for autoincrement.
def auto_increment_sql
'GENERATED ALWAYS AS IDENTITY'
end
# DB2 does not allow adding primary key constraints to NULLable columns.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
# Supply columns with NOT NULL if they are part of a composite
# primary key or unique constraint
def column_list_sql(g)
ks = []
g.constraints.each{|c| ks = c[:columns] if [:primary_key, :unique].include?(c[:type])}
g.columns.each{|c| c[:null] = false if ks.include?(c[:name]) }
super
end
# Insert data from the current table into the new table after
# creating the table, since it is not possible to do it in one step.
def create_table_as(name, sql, options)
super
from(name).insert(sql.is_a?(Dataset) ? sql : dataset.with_sql(sql))
end
# DB2 requires parens around the SELECT, and DEFINITION ONLY at the end.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS (#{sql}) DEFINITION ONLY"
end
# Here we use DGTT which has most backward compatibility, which uses
# DECLARE instead of CREATE. CGTT can only be used after version 9.7.
# http://www.ibm.com/developerworks/data/library/techarticle/dm-0912globaltemptable/
def create_table_prefix_sql(name, options)
if options[:temp]
"DECLARE GLOBAL TEMPORARY TABLE #{quote_identifier(name)}"
else
super
end
end
DATABASE_ERROR_REGEXPS = {
/DB2 SQL Error: SQLCODE=-803, SQLSTATE=23505|One or more values in the INSERT statement, UPDATE statement, or foreign key update caused by a DELETE statement are not valid because the primary key, unique constraint or unique index/ => UniqueConstraintViolation,
/DB2 SQL Error: (SQLCODE=-530, SQLSTATE=23503|SQLCODE=-532, SQLSTATE=23504)|The insert or update value of the FOREIGN KEY .+ is not equal to any value of the parent key of the parent table|A parent row cannot be deleted because the relationship .+ restricts the deletion/ => ForeignKeyConstraintViolation,
/DB2 SQL Error: SQLCODE=-545, SQLSTATE=23513|The requested operation is not allowed because a row does not satisfy the check constraint/ => CheckConstraintViolation,
/DB2 SQL Error: SQLCODE=-407, SQLSTATE=23502|Assignment of a NULL value to a NOT NULL column/ => NotNullConstraintViolation,
/DB2 SQL Error: SQLCODE=-911, SQLSTATE=40001|The current transaction has been rolled back because of a deadlock or timeout/ => SerializationFailure,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# DB2 has issues with quoted identifiers, so
# turn off database quoting by default.
def quote_identifiers_default
false
end
# DB2 uses RENAME TABLE to rename tables.
def rename_table_sql(name, new_name)
"RENAME TABLE #{quote_schema_table(name)} TO #{quote_schema_table(new_name)}"
end
# Run the REORG TABLE command for the table, necessary when
# the table has been altered.
def reorg(table)
execute_ddl(reorg_sql(table))
end
# The SQL to use for REORGing a table.
def reorg_sql(table)
"CALL SYSPROC.ADMIN_CMD(#{literal("REORG TABLE #{quote_schema_table(table)}")})"
end
# Treat clob as blob if use_clob_as_blob is true
def schema_column_type(db_type)
(use_clob_as_blob && db_type.downcase == 'clob') ? :blob : super
end
# SQL to set the transaction isolation level
def set_transaction_isolation_sql(level)
"SET CURRENT ISOLATION #{Database::TRANSACTION_ISOLATION_LEVELS[level]}"
end
# We uses the clob type by default for Files.
# Note: if user select to use blob, then insert statement should use
# use this for blob value:
# cast(X'fffefdfcfbfa' as blob(2G))
def type_literal_generic_file(column)
use_clob_as_blob ? :clob : :blob
end
# DB2 uses smallint to store booleans.
def type_literal_generic_trueclass(column)
:smallint
end
alias type_literal_generic_falseclass type_literal_generic_trueclass
# DB2 uses clob for text types.
def uses_clob_for_text?
true
end
# DB2 supports views with check option.
def view_with_check_option_support
:local
end
end
module DatasetMethods
include EmulateOffsetWithRowNumber
include ::Sequel::Dataset::ColumnsLimit1
BITWISE_METHOD_MAP = {:& =>:BITAND, :| => :BITOR, :^ => :BITXOR, :'B~'=>:BITNOT}.freeze
# DB2 casts strings using RTRIM and CHAR instead of VARCHAR.
def cast_sql_append(sql, expr, type)
if(type == String)
sql << "RTRIM(CHAR("
literal_append(sql, expr)
sql << "))"
else
super
end
end
def complex_expression_sql_append(sql, op, args)
case op
when :&, :|, :^, :%, :<<, :>>
complex_expression_emulate_append(sql, op, args)
when :'B~'
literal_append(sql, SQL::Function.new(:BITNOT, *args))
when :extract
sql << args[0].to_s
sql << '('
literal_append(sql, args[1])
sql << ')'
else
super
end
end
def quote_identifiers?
@opts.fetch(:quote_identifiers, false)
end
def supports_cte?(type=:select)
type == :select
end
# DB2 supports GROUP BY CUBE
def supports_group_cube?
true
end
# DB2 supports GROUP BY ROLLUP
def supports_group_rollup?
true
end
# DB2 supports GROUPING SETS
def supports_grouping_sets?
true
end
# DB2 does not support IS TRUE.
def supports_is_true?
false
end
# DB2 supports lateral subqueries
def supports_lateral_subqueries?
true
end
# DB2 supports MERGE
def supports_merge?
true
end
# DB2 does not support multiple columns in IN.
def supports_multiple_column_in?
false
end
# DB2 only allows * in SELECT if it is the only thing being selected.
def supports_select_all_and_column?
false
end
# DB2 supports window functions
def supports_window_functions?
true
end
# DB2 does not support WHERE 1.
def supports_where_true?
false
end
private
# Normalize conditions for MERGE WHEN.
def _merge_when_conditions_sql(sql, data)
if data.has_key?(:conditions)
sql << " AND "
literal_append(sql, _normalize_merge_when_conditions(data[:conditions]))
end
end
# Handle nil, false, and true MERGE WHEN conditions to avoid non-boolean
# type error.
def _normalize_merge_when_conditions(conditions)
case conditions
when nil, false
{1=>0}
when true
{1=>1}
when Sequel::SQL::DelayedEvaluation
Sequel.delay{_normalize_merge_when_conditions(conditions.call(self))}
else
conditions
end
end
def empty_from_sql
' FROM "SYSIBM"."SYSDUMMY1"'
end
# Emulate offset with row number by default, and also when the limit_offset
# strategy is used without a limit, as DB2 doesn't support that syntax with
# no limit.
def emulate_offset_with_row_number?
super && (db.offset_strategy == :emulate || (db.offset_strategy == :limit_offset && !@opts[:limit]))
end
# DB2 needs the standard workaround to insert all default values into
# a table with more than one column.
def insert_supports_empty_values?
false
end
# Use 0 for false on DB2
def literal_false
'0'
end
# DB2 doesn't support fractional seconds in times, only fractional seconds in timestamps.
def literal_sqltime(v)
v.strftime("'%H:%M:%S'")
end
# Use 1 for true on DB2
def literal_true
'1'
end
# DB2 uses a literal hexidecimal number for blob strings
def literal_blob_append(sql, v)
if db.use_clob_as_blob
super
else
sql << "BLOB(X'" << v.unpack("H*").first << "')"
end
end
# DB2 can insert multiple rows using a UNION
def multi_insert_sql_strategy
:union
end
# Emulate the char_length function with length
def native_function_name(emulated_function)
if emulated_function == :char_length
'length'
else
super
end
end
# DB2 does not require that ROW_NUMBER be ordered.
def require_offset_order?
false
end
# At least some versions of DB do not support NULLS FIRST/LAST.
def requires_emulating_nulls_first?
true
end
# Modify the sql to limit the number of rows returned.
# Uses :offset_strategy Database option to determine how to format the
# limit and offset.
def select_limit_sql(sql)
strategy = db.offset_strategy
return super if strategy == :limit_offset
if strategy == :offset_fetch && (o = @opts[:offset])
sql << " OFFSET "
literal_append(sql, o)
sql << " ROWS"
end
if l = @opts[:limit]
if l == 1
sql << " FETCH FIRST ROW ONLY"
else
sql << " FETCH FIRST "
literal_append(sql, l)
sql << " ROWS ONLY"
end
end
end
# DB2 supports quoted function names.
def supports_quoted_function_names?
true
end
def _truncate_sql(table)
# "TRUNCATE #{table} IMMEDIATE" is only for newer version of db2, so we
# use the following one
"ALTER TABLE #{quote_schema_table(table)} ACTIVATE NOT LOGGED INITIALLY WITH EMPTY TABLE"
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/mssql.rb 0000664 0000000 0000000 00000123265 14342141206 0021712 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/emulate_offset_with_row_number'
require_relative '../utils/split_alter_table'
module Sequel
module MSSQL
Sequel::Database.set_shared_adapter_scheme(:mssql, self)
def self.mock_adapter_setup(db)
db.instance_exec do
@server_version = 11000000
end
end
module DatabaseMethods
FOREIGN_KEY_ACTION_MAP = {0 => :no_action, 1 => :cascade, 2 => :set_null, 3 => :set_default}.freeze
include Sequel::Database::SplitAlterTable
# Whether to use N'' to quote strings, which allows unicode characters inside the
# strings. True by default for compatibility, can be set to false for a possible
# performance increase. This sets the default for all datasets created from this
# Database object.
attr_accessor :mssql_unicode_strings
# Whether to use LIKE without COLLATE Latin1_General_CS_AS. Skipping the COLLATE
# can significantly increase performance in some cases.
attr_accessor :like_without_collate
# Execute the given stored procedure with the given name.
#
# Options:
# :args :: Arguments to stored procedure. For named arguments, this should be a
# hash keyed by argument named. For unnamed arguments, this should be an
# array. Output parameters to the function are specified using :output.
# You can also name output parameters and provide a type by using an
# array containing :output, the type name, and the parameter name.
# :server :: The server/shard on which to execute the procedure.
#
# This method returns a single hash with the following keys:
#
# :result :: The result code of the stored procedure
# :numrows :: The number of rows affected by the stored procedure
# output params :: Values for any output paramters, using the name given for the output parameter
#
# Because Sequel datasets only support a single result set per query, and retrieving
# the result code and number of rows requires a query, this does not support
# stored procedures which also return result sets. To handle such stored procedures,
# you should drop down to the connection/driver level by using Sequel::Database#synchronize
# to get access to the underlying connection object.
#
# Examples:
#
# DB.call_mssql_sproc(:SequelTest, {args: ['input arg', :output]})
# DB.call_mssql_sproc(:SequelTest, {args: ['input arg', [:output, 'int', 'varname']]})
#
# named params:
# DB.call_mssql_sproc(:SequelTest, args: {
# 'input_arg1_name' => 'input arg1 value',
# 'input_arg2_name' => 'input arg2 value',
# 'output_arg_name' => [:output, 'int', 'varname']
# })
def call_mssql_sproc(name, opts=OPTS)
args = opts[:args] || []
names = ['@RC AS RESULT', '@@ROWCOUNT AS NUMROWS']
declarations = ['@RC int']
values = []
if args.is_a?(Hash)
named_args = true
args = args.to_a
method = :each
else
method = :each_with_index
end
args.public_send(method) do |v, i|
if named_args
k = v
v, type, select = i
raise Error, "must provide output parameter name when using output parameters with named arguments" if v == :output && !select
else
v, type, select = v
end
if v == :output
type ||= "nvarchar(max)"
if named_args
varname = select
else
varname = "var#{i}"
select ||= varname
end
names << "@#{varname} AS #{quote_identifier(select)}"
declarations << "@#{varname} #{type}"
value = "@#{varname} OUTPUT"
else
value = literal(v)
end
if named_args
value = "@#{k}=#{value}"
end
values << value
end
sql = "DECLARE #{declarations.join(', ')}; EXECUTE @RC = #{name} #{values.join(', ')}; SELECT #{names.join(', ')}"
ds = dataset.with_sql(sql)
ds = ds.server(opts[:server]) if opts[:server]
ds.first
end
def database_type
:mssql
end
# Microsoft SQL Server namespaces indexes per table.
def global_index_namespace?
false
end
# Return foreign key information using the system views, including
# :name, :on_delete, and :on_update entries in the hashes.
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
schema, table = schema_and_table(table)
current_schema = m.call(get(Sequel.function('schema_name')))
fk_action_map = FOREIGN_KEY_ACTION_MAP
fk = Sequel[:fk]
fkc = Sequel[:fkc]
ds = metadata_dataset.from(Sequel.lit('[sys].[foreign_keys]').as(:fk)).
join(Sequel.lit('[sys].[foreign_key_columns]').as(:fkc), :constraint_object_id => :object_id).
join(Sequel.lit('[sys].[all_columns]').as(:pc), :object_id => fkc[:parent_object_id], :column_id => fkc[:parent_column_id]).
join(Sequel.lit('[sys].[all_columns]').as(:rc), :object_id => fkc[:referenced_object_id], :column_id => fkc[:referenced_column_id]).
where{{object_schema_name(fk[:parent_object_id]) => im.call(schema || current_schema)}}.
where{{object_name(fk[:parent_object_id]) => im.call(table)}}.
select{[fk[:name],
fk[:delete_referential_action],
fk[:update_referential_action],
pc[:name].as(:column),
rc[:name].as(:referenced_column),
object_schema_name(fk[:referenced_object_id]).as(:schema),
object_name(fk[:referenced_object_id]).as(:table)]}.
order(fk[:name], fkc[:constraint_column_id])
h = {}
ds.each do |row|
if r = h[row[:name]]
r[:columns] << m.call(row[:column])
r[:key] << m.call(row[:referenced_column])
else
referenced_schema = m.call(row[:schema])
referenced_table = m.call(row[:table])
h[row[:name]] = { :name => m.call(row[:name]),
:table => (referenced_schema == current_schema) ? referenced_table : Sequel.qualify(referenced_schema, referenced_table),
:columns => [m.call(row[:column])],
:key => [m.call(row[:referenced_column])],
:on_update => fk_action_map[row[:update_referential_action]],
:on_delete => fk_action_map[row[:delete_referential_action]] }
end
end
h.values
end
def freeze
server_version
super
end
# Use the system tables to get index information
def indexes(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
indexes = {}
table = table.value if table.is_a?(Sequel::SQL::Identifier)
i = Sequel[:i]
ds = metadata_dataset.from(Sequel.lit('[sys].[tables]').as(:t)).
join(Sequel.lit('[sys].[indexes]').as(:i), :object_id=>:object_id).
join(Sequel.lit('[sys].[index_columns]').as(:ic), :object_id=>:object_id, :index_id=>:index_id).
join(Sequel.lit('[sys].[columns]').as(:c), :object_id=>:object_id, :column_id=>:column_id).
select(i[:name], i[:is_unique], Sequel[:c][:name].as(:column)).
where{{t[:name]=>im.call(table)}}.
where(i[:is_primary_key]=>0, i[:is_disabled]=>0).
order(i[:name], Sequel[:ic][:index_column_id])
if supports_partial_indexes?
ds = ds.where(i[:has_filter]=>0)
end
ds.each do |r|
index = indexes[m.call(r[:name])] ||= {:columns=>[], :unique=>(r[:is_unique] && r[:is_unique]!=0)}
index[:columns] << m.call(r[:column])
end
indexes
end
# The version of the MSSQL server, as an integer (e.g. 10001600 for
# SQL Server 2008 Express).
def server_version(server=nil)
return @server_version if @server_version
if @opts[:server_version]
return @server_version = Integer(@opts[:server_version])
end
@server_version = synchronize(server) do |conn|
(conn.server_version rescue nil) if conn.respond_to?(:server_version)
end
unless @server_version
m = /^(\d+)\.(\d+)\.(\d+)/.match(fetch("SELECT CAST(SERVERPROPERTY('ProductVersion') AS varchar)").single_value.to_s)
@server_version = (m[1].to_i * 1000000) + (m[2].to_i * 10000) + m[3].to_i
end
@server_version
end
# MSSQL 2008+ supports partial indexes.
def supports_partial_indexes?
dataset.send(:is_2008_or_later?)
end
# MSSQL supports savepoints, though it doesn't support releasing them
def supports_savepoints?
true
end
# MSSQL supports transaction isolation levels
def supports_transaction_isolation_levels?
true
end
# MSSQL supports transaction DDL statements.
def supports_transactional_ddl?
true
end
# Microsoft SQL Server supports using the INFORMATION_SCHEMA to get
# information on tables.
def tables(opts=OPTS)
information_schema_tables('BASE TABLE', opts)
end
# Microsoft SQL Server supports using the INFORMATION_SCHEMA to get
# information on views.
def views(opts=OPTS)
information_schema_tables('VIEW', opts)
end
private
# Add CLUSTERED or NONCLUSTERED as needed
def add_clustered_sql_fragment(sql, opts)
clustered = opts[:clustered]
unless clustered.nil?
sql += " #{'NON' unless clustered}CLUSTERED"
end
sql
end
# Add dropping of the default constraint to the list of SQL queries.
# This is necessary before dropping the column or changing its type.
def add_drop_default_constraint_sql(sqls, table, column)
if constraint = default_constraint_name(table, column)
sqls << "ALTER TABLE #{quote_schema_table(table)} DROP CONSTRAINT #{constraint}"
end
end
# MSSQL uses the IDENTITY(1,1) column for autoincrementing columns.
def auto_increment_sql
'IDENTITY(1,1)'
end
def alter_table_sql(table, op)
case op[:op]
when :add_column
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
when :drop_column
sqls = []
add_drop_default_constraint_sql(sqls, table, op[:name])
sqls << super
when :rename_column
"sp_rename #{literal("#{quote_schema_table(table)}.#{quote_identifier(op[:name])}")}, #{literal(metadata_dataset.with_quote_identifiers(false).quote_identifier(op[:new_name]))}, 'COLUMN'"
when :set_column_type
sqls = []
if sch = schema(table)
if cs = sch.each{|k, v| break v if k == op[:name]; nil}
cs = cs.dup
add_drop_default_constraint_sql(sqls, table, op[:name])
cs[:default] = cs[:ruby_default]
op = cs.merge!(op)
default = op.delete(:default)
end
end
sqls << "ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{column_definition_sql(op)}"
sqls << alter_table_sql(table, op.merge(:op=>:set_column_default, :default=>default, :skip_drop_default=>true)) if default
sqls
when :set_column_null
sch = schema(table).find{|k,v| k.to_s == op[:name].to_s}.last
type = sch[:db_type]
if [:string, :decimal, :blob].include?(sch[:type]) && !["text", "ntext"].include?(type) && (size = (sch[:max_chars] || sch[:column_size]))
size = "MAX" if size == -1
type += "(#{size}#{", #{sch[:scale]}" if sch[:scale] && sch[:scale].to_i > 0})"
end
"ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(op[:name])} #{type_literal(:type=>type)} #{'NOT ' unless op[:null]}NULL"
when :set_column_default
sqls = []
add_drop_default_constraint_sql(sqls, table, op[:name]) unless op[:skip_drop_default]
sqls << "ALTER TABLE #{quote_schema_table(table)} ADD CONSTRAINT #{quote_identifier("sequel_#{table}_#{op[:name]}_def")} DEFAULT #{literal(op[:default])} FOR #{quote_identifier(op[:name])}"
else
super(table, op)
end
end
def begin_savepoint_sql(depth)
"SAVE TRANSACTION autopoint_#{depth}"
end
def begin_transaction_sql
"BEGIN TRANSACTION"
end
# MSSQL does not allow adding primary key constraints to NULLable columns.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
# MSSQL tinyint types are unsigned.
def column_schema_tinyint_type_is_unsigned?
true
end
# Handle MSSQL specific default format.
def column_schema_normalize_default(default, type)
if m = /\A(?:\(N?('.*')\)|\(\((-?\d+(?:\.\d+)?)\)\))\z/.match(default)
default = m[1] || m[2]
end
super(default, type)
end
# Commit the active transaction on the connection, does not release savepoints.
def commit_transaction(conn, opts=OPTS)
log_connection_execute(conn, commit_transaction_sql) unless savepoint_level(conn) > 1
end
def commit_transaction_sql
"COMMIT TRANSACTION"
end
# MSSQL uses the name of the table to decide the difference between
# a regular and temporary table, with temporary table names starting with
# a #.
def create_table_prefix_sql(name, options)
"CREATE TABLE #{quote_schema_table(options[:temp] ? "##{name}" : name)}"
end
# MSSQL doesn't support CREATE TABLE AS, it only supports SELECT INTO.
# Emulating CREATE TABLE AS using SELECT INTO is only possible if a dataset
# is given as the argument, it can't work with a string, so raise an
# Error if a string is given.
def create_table_as(name, ds, options)
raise(Error, "must provide dataset instance as value of create_table :as option on MSSQL") unless ds.is_a?(Sequel::Dataset)
run(ds.into(name).sql)
end
DATABASE_ERROR_REGEXPS = {
/Violation of UNIQUE KEY constraint|(Violation of PRIMARY KEY constraint.+)?Cannot insert duplicate key/ => UniqueConstraintViolation,
/conflicted with the (FOREIGN KEY.*|REFERENCE) constraint/ => ForeignKeyConstraintViolation,
/conflicted with the CHECK constraint/ => CheckConstraintViolation,
/column does not allow nulls/ => NotNullConstraintViolation,
/was deadlocked on lock resources with another process and has been chosen as the deadlock victim/ => SerializationFailure,
/Lock request time out period exceeded\./ => DatabaseLockTimeout,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# The name of the constraint for setting the default value on the table and column.
# The SQL used to select default constraints utilizes MSSQL catalog views which were introduced in 2005.
# This method intentionally does not support MSSQL 2000.
def default_constraint_name(table, column_name)
if server_version >= 9000000
table_name = schema_and_table(table).compact.join('.')
self[Sequel[:sys][:default_constraints]].
where{{:parent_object_id => Sequel::SQL::Function.new(:object_id, table_name), col_name(:parent_object_id, :parent_column_id) => column_name.to_s}}.
get(:name)
end
end
def drop_index_sql(table, op)
"DROP INDEX #{quote_identifier(op[:name] || default_index_name(table, op[:columns]))} ON #{quote_schema_table(table)}"
end
def index_definition_sql(table_name, index)
index_name = index[:name] || default_index_name(table_name, index[:columns])
raise Error, "Partial indexes are not supported for this database" if index[:where] && !supports_partial_indexes?
if index[:type] == :full_text
"CREATE FULLTEXT INDEX ON #{quote_schema_table(table_name)} #{literal(index[:columns])} KEY INDEX #{literal(index[:key_index])}"
else
"CREATE #{'UNIQUE ' if index[:unique]}#{'CLUSTERED ' if index[:type] == :clustered}INDEX #{quote_identifier(index_name)} ON #{quote_schema_table(table_name)} #{literal(index[:columns])}#{" INCLUDE #{literal(index[:include])}" if index[:include]}#{" WHERE #{filter_expr(index[:where])}" if index[:where]}"
end
end
# Backbone of the tables and views support.
def information_schema_tables(type, opts)
m = output_identifier_meth
metadata_dataset.from(Sequel[:information_schema][:tables].as(:t)).
select(:table_name).
where(:table_type=>type, :table_schema=>(opts[:schema]||'dbo').to_s).
map{|x| m.call(x[:table_name])}
end
# Always quote identifiers in the metadata_dataset, so schema parsing works.
def _metadata_dataset
super.with_quote_identifiers(true)
end
# Handle clustered and nonclustered primary keys
def primary_key_constraint_sql_fragment(opts)
add_clustered_sql_fragment(super, opts)
end
# Use sp_rename to rename the table
def rename_table_sql(name, new_name)
"sp_rename #{literal(quote_schema_table(name))}, #{quote_identifier(schema_and_table(new_name).pop)}"
end
def rollback_savepoint_sql(depth)
"IF @@TRANCOUNT > 0 ROLLBACK TRANSACTION autopoint_#{depth}"
end
def rollback_transaction_sql
"IF @@TRANCOUNT > 0 ROLLBACK TRANSACTION"
end
def schema_column_type(db_type)
case db_type
when /\A(?:bit)\z/io
:boolean
when /\A(?:(?:small)?money)\z/io
:decimal
when /\A(timestamp|rowversion)\z/io
:blob
else
super
end
end
# MSSQL uses the INFORMATION_SCHEMA to hold column information, and
# parses primary key information from the sysindexes, sysindexkeys,
# and syscolumns system tables.
def schema_parse_table(table_name, opts)
m = output_identifier_meth(opts[:dataset])
m2 = input_identifier_meth(opts[:dataset])
tn = m2.call(table_name.to_s)
info_sch_sch = opts[:information_schema_schema]
inf_sch_qual = lambda{|s| info_sch_sch ? Sequel.qualify(info_sch_sch, s) : Sequel[s]}
table_id = metadata_dataset.from(inf_sch_qual.call(Sequel[:sys][:objects])).where(:name => tn).select_map(:object_id).first
identity_cols = metadata_dataset.from(inf_sch_qual.call(Sequel[:sys][:columns])).
where(:object_id=>table_id, :is_identity=>true).
select_map(:name)
pk_index_id = metadata_dataset.from(inf_sch_qual.call(Sequel[:sys][:sysindexes])).
where(:id=>table_id, :indid=>1..254){{(status & 2048)=>2048}}.
get(:indid)
pk_cols = metadata_dataset.from(inf_sch_qual.call(Sequel[:sys][:sysindexkeys]).as(:sik)).
join(inf_sch_qual.call(Sequel[:sys][:syscolumns]).as(:sc), :id=>:id, :colid=>:colid).
where{{sik[:id]=>table_id, sik[:indid]=>pk_index_id}}.
select_order_map{sc[:name]}
ds = metadata_dataset.from(inf_sch_qual.call(Sequel[:information_schema][:tables]).as(:t)).
join(inf_sch_qual.call(Sequel[:information_schema][:columns]).as(:c), :table_catalog=>:table_catalog,
:table_schema => :table_schema, :table_name => :table_name).
select{[column_name.as(:column), data_type.as(:db_type), character_maximum_length.as(:max_chars), column_default.as(:default), is_nullable.as(:allow_null), numeric_precision.as(:column_size), numeric_scale.as(:scale)]}.
where{{c[:table_name]=>tn}}
if schema = opts[:schema]
ds = ds.where{{c[:table_schema]=>schema}}
end
ds.map do |row|
if row[:primary_key] = pk_cols.include?(row[:column])
row[:auto_increment] = identity_cols.include?(row[:column])
end
row[:allow_null] = row[:allow_null] == 'YES' ? true : false
row[:default] = nil if blank_object?(row[:default])
row[:type] = if row[:db_type] =~ /number|numeric|decimal/i && row[:scale] == 0
:integer
else
schema_column_type(row[:db_type])
end
row[:max_length] = row[:max_chars] if row[:type] == :string && row[:max_chars] >= 0
[m.call(row.delete(:column)), row]
end
end
# Set the mssql_unicode_strings settings from the given options.
def set_mssql_unicode_strings
@mssql_unicode_strings = typecast_value_boolean(@opts.fetch(:mssql_unicode_strings, true))
end
# MSSQL has both datetime and timestamp classes, most people are going
# to want datetime
def type_literal_generic_datetime(column)
:datetime
end
# MSSQL doesn't have a true boolean class, so it uses bit
def type_literal_generic_trueclass(column)
:bit
end
# MSSQL uses varbinary(max) type for blobs
def type_literal_generic_file(column)
:'varbinary(max)'
end
# Handle clustered and nonclustered unique constraints
def unique_constraint_sql_fragment(opts)
add_clustered_sql_fragment(super, opts)
end
# MSSQL supports views with check option, but not local.
def view_with_check_option_support
true
end
end
module DatasetMethods
include(Module.new do
Dataset.def_sql_method(self, :select, %w'with select distinct limit columns into from lock join where group having compounds order')
end)
include EmulateOffsetWithRowNumber
CONSTANT_MAP = {:CURRENT_DATE=>'CAST(CURRENT_TIMESTAMP AS DATE)'.freeze, :CURRENT_TIME=>'CAST(CURRENT_TIMESTAMP AS TIME)'.freeze}.freeze
EXTRACT_MAP = {:year=>"yy", :month=>"m", :day=>"d", :hour=>"hh", :minute=>"n", :second=>"s"}.freeze
EXTRACT_MAP.each_value(&:freeze)
LIMIT_ALL = Object.new.freeze
Dataset.def_sql_method(self, :delete, %w'with delete limit from output from2 where')
Dataset.def_sql_method(self, :insert, %w'with insert into columns output values')
Dataset.def_sql_method(self, :update, [['if is_2005_or_later?', %w'with update limit table set output from where'], ['else', %w'update table set output from where']])
# Use the database's mssql_unicode_strings setting if the dataset hasn't overridden it.
def mssql_unicode_strings
opts.has_key?(:mssql_unicode_strings) ? opts[:mssql_unicode_strings] : db.mssql_unicode_strings
end
# Return a cloned dataset with the mssql_unicode_strings option set.
def with_mssql_unicode_strings(v)
clone(:mssql_unicode_strings=>v)
end
def complex_expression_sql_append(sql, op, args)
case op
when :'||'
super(sql, :+, args)
when :LIKE, :"NOT LIKE"
super(sql, op, complex_expression_sql_like_args(args, " COLLATE Latin1_General_CS_AS)"))
when :ILIKE, :"NOT ILIKE"
super(sql, (op == :ILIKE ? :LIKE : :"NOT LIKE"), complex_expression_sql_like_args(args, " COLLATE Latin1_General_CI_AS)"))
when :<<, :>>
complex_expression_emulate_append(sql, op, args)
when :extract
part = args[0]
raise(Sequel::Error, "unsupported extract argument: #{part.inspect}") unless format = EXTRACT_MAP[part]
if part == :second
expr = args[1]
sql << "CAST((datepart(" << format.to_s << ', '
literal_append(sql, expr)
sql << ') + datepart(ns, '
literal_append(sql, expr)
sql << ")/1000000000.0) AS double precision)"
else
sql << "datepart(" << format.to_s << ', '
literal_append(sql, args[1])
sql << ')'
end
else
super
end
end
# MSSQL doesn't support the SQL standard CURRENT_DATE or CURRENT_TIME
def constant_sql_append(sql, constant)
if c = CONSTANT_MAP[constant]
sql << c
else
super
end
end
# Uses CROSS APPLY to join the given table into the current dataset.
def cross_apply(table)
join_table(:cross_apply, table)
end
# Disable the use of INSERT OUTPUT
def disable_insert_output
clone(:disable_insert_output=>true)
end
# MSSQL treats [] as a metacharacter in LIKE expresions.
def escape_like(string)
string.gsub(/[\\%_\[\]]/){|m| "\\#{m}"}
end
# MSSQL uses the CONTAINS keyword for full text search
def full_text_search(cols, terms, opts = OPTS)
terms = "\"#{terms.join('" OR "')}\"" if terms.is_a?(Array)
where(Sequel.lit("CONTAINS (?, ?)", cols, terms))
end
# Insert a record, returning the record inserted, using OUTPUT. Always returns nil without
# running an INSERT statement if disable_insert_output is used. If the query runs
# but returns no values, returns false.
def insert_select(*values)
return unless supports_insert_select?
with_sql_first(insert_select_sql(*values)) || false
end
# Add OUTPUT clause unless there is already an existing output clause, then return
# the SQL to insert.
def insert_select_sql(*values)
ds = (opts[:output] || opts[:returning]) ? self : output(nil, [SQL::ColumnAll.new(:inserted)])
ds.insert_sql(*values)
end
# Specify a table for a SELECT ... INTO query.
def into(table)
clone(:into => table)
end
# Allows you to do a dirty read of uncommitted data using WITH (NOLOCK).
def nolock
lock_style(:dirty)
end
# Uses OUTER APPLY to join the given table into the current dataset.
def outer_apply(table)
join_table(:outer_apply, table)
end
# Include an OUTPUT clause in the eventual INSERT, UPDATE, or DELETE query.
#
# The first argument is the table to output into, and the second argument
# is either an Array of column values to select, or a Hash which maps output
# column names to selected values, in the style of #insert or #update.
#
# Output into a returned result set is not currently supported.
#
# Examples:
#
# dataset.output(:output_table, [Sequel[:deleted][:id], Sequel[:deleted][:name]])
# dataset.output(:output_table, id: Sequel[:inserted][:id], name: Sequel[:inserted][:name])
def output(into, values)
raise(Error, "SQL Server versions 2000 and earlier do not support the OUTPUT clause") unless supports_output_clause?
output = {}
case values
when Hash
output[:column_list], output[:select_list] = values.keys, values.values
when Array
output[:select_list] = values
end
output[:into] = into
clone(:output => output)
end
# MSSQL uses [] to quote identifiers.
def quoted_identifier_append(sql, name)
sql << '[' << name.to_s.gsub(/\]/, ']]') << ']'
end
# Emulate RETURNING using the output clause. This only handles values that are simple column references.
def returning(*values)
values = values.map do |v|
unless r = unqualified_column_for(v)
raise(Error, "cannot emulate RETURNING via OUTPUT for value: #{v.inspect}")
end
r
end
clone(:returning=>values)
end
# On MSSQL 2012+ add a default order to the current dataset if an offset is used.
# The default offset emulation using a subquery would be used in the unordered
# case by default, and that also adds a default order, so it's better to just
# avoid the subquery.
def select_sql
if @opts[:offset]
raise(Error, "Using with_ties is not supported with an offset on Microsoft SQL Server") if @opts[:limit_with_ties]
return order(1).select_sql if is_2012_or_later? && !@opts[:order]
end
super
end
# The version of the database server.
def server_version
db.server_version(@opts[:server])
end
def supports_cte?(type=:select)
is_2005_or_later?
end
# MSSQL 2005+ supports GROUP BY CUBE.
def supports_group_cube?
is_2005_or_later?
end
# MSSQL 2005+ supports GROUP BY ROLLUP
def supports_group_rollup?
is_2005_or_later?
end
# MSSQL 2008+ supports GROUPING SETS
def supports_grouping_sets?
is_2008_or_later?
end
# MSSQL supports insert_select via the OUTPUT clause.
def supports_insert_select?
supports_output_clause? && !opts[:disable_insert_output]
end
# MSSQL 2005+ supports INTERSECT and EXCEPT
def supports_intersect_except?
is_2005_or_later?
end
# MSSQL does not support IS TRUE
def supports_is_true?
false
end
# MSSQL doesn't support JOIN USING
def supports_join_using?
false
end
# MSSQL 2008+ supports MERGE
def supports_merge?
is_2008_or_later?
end
# MSSQL 2005+ supports modifying joined datasets
def supports_modifying_joins?
is_2005_or_later?
end
# MSSQL does not support multiple columns for the IN/NOT IN operators
def supports_multiple_column_in?
false
end
# MSSQL supports NOWAIT.
def supports_nowait?
true
end
# MSSQL 2012+ supports offsets in correlated subqueries.
def supports_offsets_in_correlated_subqueries?
is_2012_or_later?
end
# MSSQL 2005+ supports the OUTPUT clause.
def supports_output_clause?
is_2005_or_later?
end
# MSSQL 2005+ can emulate RETURNING via the OUTPUT clause.
def supports_returning?(type)
supports_insert_select?
end
# MSSQL uses READPAST to skip locked rows.
def supports_skip_locked?
true
end
# MSSQL 2005+ supports window functions
def supports_window_functions?
true
end
# MSSQL cannot use WHERE 1.
def supports_where_true?
false
end
# Use WITH TIES when limiting the result set to also include additional
# rows matching the last row.
def with_ties
clone(:limit_with_ties=>true)
end
protected
# If returned primary keys are requested, use OUTPUT unless already set on the
# dataset. If OUTPUT is already set, use existing returning values. If OUTPUT
# is only set to return a single columns, return an array of just that column.
# Otherwise, return an array of hashes.
def _import(columns, values, opts=OPTS)
if opts[:return] == :primary_key && !@opts[:output]
output(nil, [SQL::QualifiedIdentifier.new(:inserted, first_primary_key)])._import(columns, values, opts)
elsif @opts[:output]
# no transaction: our multi_insert_sql_strategy should guarantee
# that there's only ever a single statement.
sql = multi_insert_sql(columns, values)[0]
naked.with_sql(sql).map{|v| v.length == 1 ? v.values.first : v}
else
super
end
end
# If the dataset using a order without a limit or offset or custom SQL,
# remove the order. Compounds on Microsoft SQL Server have undefined
# order unless the result is specifically ordered. Applying the current
# order before the compound doesn't work in all cases, such as when
# qualified identifiers are used. If you want to ensure a order
# for a compound dataset, apply the order after all compounds have been
# added.
def compound_from_self
if @opts[:offset] && !@opts[:limit] && !is_2012_or_later?
clone(:limit=>LIMIT_ALL).from_self
elsif @opts[:order] && !(@opts[:sql] || @opts[:limit] || @opts[:offset])
unordered
else
super
end
end
private
# Normalize conditions for MERGE WHEN.
def _merge_when_conditions_sql(sql, data)
if data.has_key?(:conditions)
sql << " AND "
literal_append(sql, _normalize_merge_when_conditions(data[:conditions]))
end
end
# Handle nil, false, and true MERGE WHEN conditions to avoid non-boolean
# type error.
def _normalize_merge_when_conditions(conditions)
case conditions
when nil, false
{1=>0}
when true
{1=>1}
when Sequel::SQL::DelayedEvaluation
Sequel.delay{_normalize_merge_when_conditions(conditions.call(self))}
else
conditions
end
end
# MSSQL requires a semicolon at the end of MERGE.
def _merge_when_sql(sql)
super
sql << ';'
end
# MSSQL does not allow ordering in sub-clauses unless TOP (limit) is specified
def aggregate_dataset
(options_overlap(Sequel::Dataset::COUNT_FROM_SELF_OPTS) && !options_overlap([:limit])) ? unordered.from_self : super
end
# Allow update and delete for unordered, limited datasets only.
def check_not_limited!(type)
return if @opts[:skip_limit_check] && type != :truncate
raise Sequel::InvalidOperation, "Dataset##{type} not suppored on ordered, limited datasets" if opts[:order] && opts[:limit]
super if type == :truncate || @opts[:offset]
end
# Whether we are using SQL Server 2005 or later.
def is_2005_or_later?
server_version >= 9000000
end
# Whether we are using SQL Server 2008 or later.
def is_2008_or_later?
server_version >= 10000000
end
# Whether we are using SQL Server 2012 or later.
def is_2012_or_later?
server_version >= 11000000
end
# Determine whether to add the COLLATE for LIKE arguments, based on the Database setting.
def complex_expression_sql_like_args(args, collation)
if db.like_without_collate
args
else
args.map{|a| Sequel.lit(["(", collation], a)}
end
end
# Use strict ISO-8601 format with T between date and time,
# since that is the format that is multilanguage and not
# DATEFORMAT dependent.
def default_timestamp_format
"'%Y-%m-%dT%H:%M:%S%N%z'"
end
# Only include the primary table in the main delete clause
def delete_from_sql(sql)
sql << ' FROM '
source_list_append(sql, @opts[:from][0..0])
end
# MSSQL supports FROM clauses in DELETE and UPDATE statements.
def delete_from2_sql(sql)
if joined_dataset?
select_from_sql(sql)
select_join_sql(sql)
end
end
alias update_from_sql delete_from2_sql
def delete_output_sql(sql)
output_sql(sql, :DELETED)
end
# There is no function on Microsoft SQL Server that does character length
# and respects trailing spaces (datalength respects trailing spaces, but
# counts bytes instead of characters). Use a hack to work around the
# trailing spaces issue.
def emulate_function?(name)
name == :char_length || name == :trim
end
def emulate_function_sql_append(sql, f)
case f.name
when :char_length
literal_append(sql, SQL::Function.new(:len, Sequel.join([f.args.first, 'x'])) - 1)
when :trim
literal_append(sql, SQL::Function.new(:ltrim, SQL::Function.new(:rtrim, f.args.first)))
end
end
# Microsoft SQL Server 2012+ has native support for offsets, but only for ordered datasets.
def emulate_offset_with_row_number?
super && !(is_2012_or_later? && @opts[:order])
end
# Return the first primary key for the current table. If this table has
# multiple primary keys, this will only return one of them. Used by #_import.
def first_primary_key
@db.schema(self).map{|k, v| k if v[:primary_key] == true}.compact.first
end
def insert_output_sql(sql)
output_sql(sql, :INSERTED)
end
alias update_output_sql insert_output_sql
# Handle CROSS APPLY and OUTER APPLY JOIN types
def join_type_sql(join_type)
case join_type
when :cross_apply
'CROSS APPLY'
when :outer_apply
'OUTER APPLY'
else
super
end
end
# MSSQL uses a literal hexidecimal number for blob strings
def literal_blob_append(sql, v)
sql << '0x' << v.unpack("H*").first
end
# Use YYYYmmdd format, since that's the only format that is
# multilanguage and not DATEFORMAT dependent.
def literal_date(v)
v.strftime("'%Y%m%d'")
end
# Use 0 for false on MSSQL
def literal_false
'0'
end
# Optionally use unicode string syntax for all strings. Don't double
# backslashes.
def literal_string_append(sql, v)
sql << (mssql_unicode_strings ? "N'" : "'")
sql << v.gsub("'", "''").gsub(/\\((?:\r\n)|\n)/, '\\\\\\\\\\1\\1') << "'"
end
# Use 1 for true on MSSQL
def literal_true
'1'
end
# MSSQL 2008+ supports multiple rows in the VALUES clause, older versions
# can use UNION.
def multi_insert_sql_strategy
is_2008_or_later? ? :values : :union
end
def non_sql_option?(key)
super || key == :disable_insert_output || key == :mssql_unicode_strings
end
def select_into_sql(sql)
if i = @opts[:into]
sql << " INTO "
identifier_append(sql, i)
end
end
# MSSQL 2000 uses TOP N for limit. For MSSQL 2005+ TOP (N) is used
# to allow the limit to be a bound variable.
def select_limit_sql(sql)
if l = @opts[:limit]
return if is_2012_or_later? && @opts[:order] && @opts[:offset]
shared_limit_sql(sql, l)
end
end
def shared_limit_sql(sql, l)
if is_2005_or_later?
if l == LIMIT_ALL
sql << " TOP (100) PERCENT"
else
sql << " TOP ("
literal_append(sql, l)
sql << ')'
end
else
sql << " TOP "
literal_append(sql, l)
end
if @opts[:limit_with_ties]
sql << " WITH TIES"
end
end
def update_limit_sql(sql)
if l = @opts[:limit]
shared_limit_sql(sql, l)
end
end
alias delete_limit_sql update_limit_sql
# Handle dirty, skip locked, and for update locking
def select_lock_sql(sql)
lock = @opts[:lock]
skip_locked = @opts[:skip_locked]
nowait = @opts[:nowait]
for_update = lock == :update
dirty = lock == :dirty
lock_hint = for_update || dirty
if lock_hint || skip_locked
sql << " WITH ("
if lock_hint
sql << (for_update ? 'UPDLOCK' : 'NOLOCK')
end
if skip_locked || nowait
sql << ', ' if lock_hint
sql << (skip_locked ? "READPAST" : "NOWAIT")
end
sql << ')'
else
super
end
end
# On 2012+ when there is an order with an offset, append the offset (and possible
# limit) at the end of the order clause.
def select_order_sql(sql)
super
if is_2012_or_later? && @opts[:order]
if o = @opts[:offset]
sql << " OFFSET "
literal_append(sql, o)
sql << " ROWS"
if l = @opts[:limit]
sql << " FETCH NEXT "
literal_append(sql, l)
sql << " ROWS ONLY"
end
end
end
end
def output_sql(sql, type)
return unless supports_output_clause?
if output = @opts[:output]
output_list_sql(sql, output)
elsif values = @opts[:returning]
output_returning_sql(sql, type, values)
end
end
def output_list_sql(sql, output)
sql << " OUTPUT "
column_list_append(sql, output[:select_list])
if into = output[:into]
sql << " INTO "
identifier_append(sql, into)
if column_list = output[:column_list]
sql << ' ('
source_list_append(sql, column_list)
sql << ')'
end
end
end
def output_returning_sql(sql, type, values)
sql << " OUTPUT "
if values.empty?
literal_append(sql, SQL::ColumnAll.new(type))
else
values = values.map do |v|
case v
when SQL::AliasedExpression
Sequel.qualify(type, v.expression).as(v.alias)
else
Sequel.qualify(type, v)
end
end
column_list_append(sql, values)
end
end
# MSSQL does not natively support NULLS FIRST/LAST.
def requires_emulating_nulls_first?
true
end
# MSSQL supports 100-nsec precision for time columns, but ruby by
# default only supports usec precision.
def sqltime_precision
6
end
# MSSQL supports millisecond timestamp precision for datetime columns.
# 100-nsec precision is supported for datetime2 columns, but Sequel does
# not know what the column type is when formatting values.
def timestamp_precision
3
end
# Only include the primary table in the main update clause
def update_table_sql(sql)
sql << ' '
source_list_append(sql, @opts[:from][0..0])
end
def uses_with_rollup?
!is_2008_or_later?
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/mysql.rb 0000664 0000000 0000000 00000112672 14342141206 0021720 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/replace'
require_relative '../utils/split_alter_table'
require_relative '../utils/unmodified_identifiers'
module Sequel
module MySQL
Sequel::Database.set_shared_adapter_scheme(:mysql, self)
def self.mock_adapter_setup(db)
db.instance_exec do
@server_version = 50617
end
end
module DatabaseMethods
include UnmodifiedIdentifiers::DatabaseMethods
include Sequel::Database::SplitAlterTable
CAST_TYPES = {String=>:CHAR, Integer=>:SIGNED, Time=>:DATETIME, DateTime=>:DATETIME, Numeric=>:DECIMAL, BigDecimal=>:DECIMAL, File=>:BINARY}.freeze
COLUMN_DEFINITION_ORDER = [:generated, :collate, :null, :default, :unique, :primary_key, :auto_increment, :references].freeze
# Set the default charset used for CREATE TABLE. You can pass the
# :charset option to create_table to override this setting.
attr_accessor :default_charset
# Set the default collation used for CREATE TABLE. You can pass the
# :collate option to create_table to override this setting.
attr_accessor :default_collate
# Set the default engine used for CREATE TABLE. You can pass the
# :engine option to create_table to override this setting.
attr_accessor :default_engine
# MySQL's cast rules are restrictive in that you can't just cast to any possible
# database type.
def cast_type_literal(type)
CAST_TYPES[type] || super
end
def commit_prepared_transaction(transaction_id, opts=OPTS)
run("XA COMMIT #{literal(transaction_id)}", opts)
end
def database_type
:mysql
end
# Use the Information Schema's KEY_COLUMN_USAGE table to get
# basic information on foreign key columns, but include the
# constraint name.
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
ds = metadata_dataset.
from(Sequel[:INFORMATION_SCHEMA][:KEY_COLUMN_USAGE]).
where(:TABLE_NAME=>im.call(table), :TABLE_SCHEMA=>Sequel.function(:DATABASE)).
exclude(:CONSTRAINT_NAME=>'PRIMARY').
exclude(:REFERENCED_TABLE_NAME=>nil).
order(:CONSTRAINT_NAME, :POSITION_IN_UNIQUE_CONSTRAINT).
select(Sequel[:CONSTRAINT_NAME].as(:name), Sequel[:COLUMN_NAME].as(:column), Sequel[:REFERENCED_TABLE_NAME].as(:table), Sequel[:REFERENCED_COLUMN_NAME].as(:key))
h = {}
ds.each do |row|
if r = h[row[:name]]
r[:columns] << m.call(row[:column])
r[:key] << m.call(row[:key])
else
h[row[:name]] = {:name=>m.call(row[:name]), :columns=>[m.call(row[:column])], :table=>m.call(row[:table]), :key=>[m.call(row[:key])]}
end
end
h.values
end
def freeze
server_version
mariadb?
supports_timestamp_usecs?
super
end
# MySQL namespaces indexes per table.
def global_index_namespace?
false
end
# Use SHOW INDEX FROM to get the index information for the
# table.
#
# By default partial indexes are not included, you can use the
# option :partial to override this.
def indexes(table, opts=OPTS)
indexes = {}
remove_indexes = []
m = output_identifier_meth
schema, table = schema_and_table(table)
table = Sequel::SQL::Identifier.new(table)
sql = "SHOW INDEX FROM #{literal(table)}"
if schema
schema = Sequel::SQL::Identifier.new(schema)
sql += " FROM #{literal(schema)}"
end
metadata_dataset.with_sql(sql).each do |r|
name = r[:Key_name]
next if name == 'PRIMARY'
name = m.call(name)
remove_indexes << name if r[:Sub_part] && ! opts[:partial]
i = indexes[name] ||= {:columns=>[], :unique=>r[:Non_unique] != 1}
i[:columns] << m.call(r[:Column_name])
end
indexes.reject{|k,v| remove_indexes.include?(k)}
end
def rollback_prepared_transaction(transaction_id, opts=OPTS)
run("XA ROLLBACK #{literal(transaction_id)}", opts)
end
# Whether the database is MariaDB and not MySQL
def mariadb?
return @is_mariadb if defined?(@is_mariadb)
@is_mariadb = !(fetch('SELECT version()').single_value! !~ /mariadb/i)
end
# Get version of MySQL server, used for determined capabilities.
def server_version
@server_version ||= begin
m = /(\d+)\.(\d+)\.(\d+)/.match(fetch('SELECT version()').single_value!)
(m[1].to_i * 10000) + (m[2].to_i * 100) + m[3].to_i
end
end
# MySQL supports CREATE TABLE IF NOT EXISTS syntax.
def supports_create_table_if_not_exists?
true
end
# Generated columns are supported in MariaDB 5.2.0+ and MySQL 5.7.6+.
def supports_generated_columns?
server_version >= (mariadb? ? 50200 : 50706)
end
# MySQL 5+ supports prepared transactions (two-phase commit) using XA
def supports_prepared_transactions?
server_version >= 50000
end
# MySQL 5+ supports savepoints
def supports_savepoints?
server_version >= 50000
end
# MySQL doesn't support savepoints inside prepared transactions in from
# 5.5.12 to 5.5.23, see http://bugs.mysql.com/bug.php?id=64374
def supports_savepoints_in_prepared_transactions?
super && (server_version <= 50512 || server_version >= 50523)
end
# Support fractional timestamps on MySQL 5.6.5+ if the :fractional_seconds
# Database option is used. Technically, MySQL 5.6.4+ supports them, but
# automatic initialization of datetime values wasn't supported to 5.6.5+,
# and this is related to that.
def supports_timestamp_usecs?
return @supports_timestamp_usecs if defined?(@supports_timestamp_usecs)
@supports_timestamp_usecs = server_version >= 50605 && typecast_value_boolean(opts[:fractional_seconds])
end
# MySQL supports transaction isolation levels
def supports_transaction_isolation_levels?
true
end
# Return an array of symbols specifying table names in the current database.
#
# Options:
# :server :: Set the server to use
def tables(opts=OPTS)
full_tables('BASE TABLE', opts)
end
# Return an array of symbols specifying view names in the current database.
#
# Options:
# :server :: Set the server to use
def views(opts=OPTS)
full_tables('VIEW', opts)
end
# Renames multiple tables in a single call.
#
# DB.rename_tables [:items, :old_items], [:other_items, :old_other_items]
# # RENAME TABLE items TO old_items, other_items TO old_other_items
def rename_tables(*renames)
execute_ddl(rename_tables_sql(renames))
renames.each{|from,| remove_cached_schema(from)}
end
private
def alter_table_add_column_sql(table, op)
pos = if after_col = op[:after]
" AFTER #{quote_identifier(after_col)}"
elsif op[:first]
" FIRST"
end
sql = if related = op.delete(:table)
sql = super + "#{pos}, ADD "
op[:table] = related
op[:key] ||= primary_key_from_schema(related)
if constraint_name = op.delete(:foreign_key_constraint_name)
sql << "CONSTRAINT #{quote_identifier(constraint_name)} "
end
sql << "FOREIGN KEY (#{quote_identifier(op[:name])})#{column_references_sql(op)}"
else
"#{super}#{pos}"
end
end
def alter_table_change_column_sql(table, op)
o = op[:op]
opts = schema(table).find{|x| x.first == op[:name]}
opts = opts ? opts.last.dup : {}
opts[:name] = o == :rename_column ? op[:new_name] : op[:name]
opts[:type] = o == :set_column_type ? op[:type] : opts[:db_type]
opts[:null] = o == :set_column_null ? op[:null] : opts[:allow_null]
opts[:default] = o == :set_column_default ? op[:default] : opts[:ruby_default]
opts.delete(:default) if opts[:default] == nil
opts.delete(:primary_key)
unless op[:type] || opts[:type]
raise Error, "cannot determine database type to use for CHANGE COLUMN operation"
end
opts = op.merge(opts)
if op.has_key?(:auto_increment)
opts[:auto_increment] = op[:auto_increment]
end
"CHANGE COLUMN #{quote_identifier(op[:name])} #{column_definition_sql(opts)}"
end
alias alter_table_rename_column_sql alter_table_change_column_sql
alias alter_table_set_column_type_sql alter_table_change_column_sql
alias alter_table_set_column_null_sql alter_table_change_column_sql
def alter_table_set_column_default_sql(table, op)
return super unless op[:default].nil?
opts = schema(table).find{|x| x[0] == op[:name]}
if opts && opts[1][:allow_null] == false
"ALTER COLUMN #{quote_identifier(op[:name])} DROP DEFAULT"
else
super
end
end
def alter_table_add_constraint_sql(table, op)
if op[:type] == :foreign_key
op[:key] ||= primary_key_from_schema(op[:table])
end
super
end
def alter_table_drop_constraint_sql(table, op)
case op[:type]
when :primary_key
"DROP PRIMARY KEY"
when :foreign_key
name = op[:name] || foreign_key_name(table, op[:columns])
"DROP FOREIGN KEY #{quote_identifier(name)}"
when :unique
"DROP INDEX #{quote_identifier(op[:name])}"
when :check, nil
if supports_check_constraints?
"DROP CONSTRAINT #{quote_identifier(op[:name])}"
end
end
end
def alter_table_sql(table, op)
case op[:op]
when :drop_index
"#{drop_index_sql(table, op)} ON #{quote_schema_table(table)}"
when :drop_constraint
if op[:type] == :primary_key
if (pk = primary_key_from_schema(table)).length == 1
return [alter_table_sql(table, {:op=>:rename_column, :name=>pk.first, :new_name=>pk.first, :auto_increment=>false}), super]
end
end
super
else
super
end
end
# Handle MySQL specific default format.
def column_schema_normalize_default(default, type)
if column_schema_default_string_type?(type)
return if [:date, :datetime, :time].include?(type) && /\ACURRENT_(?:DATE|TIMESTAMP)?\z/.match(default)
default = "'#{default.gsub("'", "''").gsub('\\', '\\\\')}'"
end
super(default, type)
end
def column_schema_to_ruby_default(default, type)
return Sequel::CURRENT_DATE if mariadb? && server_version >= 100200 && default == 'curdate()'
super
end
# Don't allow combining adding foreign key operations with other
# operations, since in some cases adding a foreign key constraint in
# the same query as other operations results in MySQL error 150.
def combinable_alter_table_op?(op)
super && !(op[:op] == :add_constraint && op[:type] == :foreign_key) && !(op[:op] == :drop_constraint && op[:type] == :primary_key)
end
# The SQL queries to execute on initial connection
def mysql_connection_setting_sqls
sqls = []
if wait_timeout = opts.fetch(:timeout, 2147483)
# Increase timeout so mysql server doesn't disconnect us
# Value used by default is maximum allowed value on Windows.
sqls << "SET @@wait_timeout = #{wait_timeout}"
end
# By default, MySQL 'where id is null' selects the last inserted id
sqls << "SET SQL_AUTO_IS_NULL=0" unless opts[:auto_is_null]
# If the user has specified one or more sql modes, enable them
if sql_mode = opts[:sql_mode]
sql_mode = Array(sql_mode).join(',').upcase
sqls << "SET sql_mode = '#{sql_mode}'"
end
# Disable the use of split_materialized in the optimizer. This is
# needed to pass association tests on MariaDB 10.5+.
if opts[:disable_split_materialized] && typecast_value_boolean(opts[:disable_split_materialized])
sqls << "SET SESSION optimizer_switch='split_materialized=off'"
end
sqls
end
def auto_increment_sql
'AUTO_INCREMENT'
end
# MySQL needs to set transaction isolation before begining a transaction
def begin_new_transaction(conn, opts)
set_transaction_isolation(conn, opts)
log_connection_execute(conn, begin_transaction_sql)
end
# Use XA START to start a new prepared transaction if the :prepare
# option is given.
def begin_transaction(conn, opts=OPTS)
if (s = opts[:prepare]) && savepoint_level(conn) == 1
log_connection_execute(conn, "XA START #{literal(s)}")
else
super
end
end
# Support :on_update_current_timestamp option.
def column_definition_default_sql(sql, column)
super
sql << " ON UPDATE CURRENT_TIMESTAMP" if column[:on_update_current_timestamp]
end
# Add generation clause SQL fragment to column creation SQL.
def column_definition_generated_sql(sql, column)
if (generated_expression = column[:generated_always_as])
sql << " GENERATED ALWAYS AS (#{literal(generated_expression)})"
case (type = column[:generated_type])
when nil
# none, database default
when :virtual
sql << " VIRTUAL"
when :stored
sql << (mariadb? ? " PERSISTENT" : " STORED")
else
raise Error, "unsupported :generated_type option: #{type.inspect}"
end
end
end
def column_definition_order
COLUMN_DEFINITION_ORDER
end
# MySQL doesn't allow default values on text columns, so ignore if it the
# generic text type is used
def column_definition_sql(column)
column.delete(:default) if column[:type] == File || (column[:type] == String && column[:text] == true)
super
end
# Prepare the XA transaction for a two-phase commit if the
# :prepare option is given.
def commit_transaction(conn, opts=OPTS)
if (s = opts[:prepare]) && savepoint_level(conn) <= 1
log_connection_execute(conn, "XA END #{literal(s)}")
log_connection_execute(conn, "XA PREPARE #{literal(s)}")
else
super
end
end
# Use MySQL specific syntax for engine type and character encoding
def create_table_sql(name, generator, options = OPTS)
engine = options.fetch(:engine, default_engine)
charset = options.fetch(:charset, default_charset)
collate = options.fetch(:collate, default_collate)
generator.constraints.sort_by{|c| (c[:type] == :primary_key) ? -1 : 1}
# Proc for figuring out the primary key for a given table.
key_proc = lambda do |t|
if t == name
if pk = generator.primary_key_name
[pk]
elsif !(pkc = generator.constraints.select{|con| con[:type] == :primary_key}).empty?
pkc.first[:columns]
elsif !(pkc = generator.columns.select{|con| con[:primary_key] == true}).empty?
pkc.map{|c| c[:name]}
end
else
primary_key_from_schema(t)
end
end
# Manually set the keys, since MySQL requires one, it doesn't use the primary
# key if none are specified.
generator.constraints.each do |c|
if c[:type] == :foreign_key
c[:key] ||= key_proc.call(c[:table])
end
end
# Split column constraints into table constraints in some cases:
# foreign key - Always
# unique, primary_key - Only if constraint has a name
generator.columns.each do |c|
if t = c.delete(:table)
same_table = t == name
key = c[:key] || key_proc.call(t)
if same_table && !key.nil?
generator.constraints.unshift(:type=>:unique, :columns=>Array(key))
end
generator.foreign_key([c[:name]], t, c.merge(:name=>c[:foreign_key_constraint_name], :type=>:foreign_key, :key=>key))
end
end
"#{super}#{" ENGINE=#{engine}" if engine}#{" DEFAULT CHARSET=#{charset}" if charset}#{" DEFAULT COLLATE=#{collate}" if collate}"
end
DATABASE_ERROR_REGEXPS = {
/Duplicate entry .+ for key/ => UniqueConstraintViolation,
/foreign key constraint fails/ => ForeignKeyConstraintViolation,
/cannot be null/ => NotNullConstraintViolation,
/Deadlock found when trying to get lock; try restarting transaction/ => SerializationFailure,
/CONSTRAINT .+ failed for/ => CheckConstraintViolation,
/\A(Statement aborted because lock\(s\) could not be acquired immediately and NOWAIT is set\.|Lock wait timeout exceeded; try restarting transaction)/ => DatabaseLockTimeout,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Backbone of the tables and views support using SHOW FULL TABLES.
def full_tables(type, opts)
m = output_identifier_meth
metadata_dataset.with_sql('SHOW FULL TABLES').server(opts[:server]).map{|r| m.call(r.values.first) if r.delete(:Table_type) == type}.compact
end
def index_definition_sql(table_name, index)
index_name = quote_identifier(index[:name] || default_index_name(table_name, index[:columns]))
raise Error, "Partial indexes are not supported for this database" if index[:where] && !supports_partial_indexes?
index_type = case index[:type]
when :full_text
"FULLTEXT "
when :spatial
"SPATIAL "
else
using = " USING #{index[:type]}" unless index[:type] == nil
"UNIQUE " if index[:unique]
end
"CREATE #{index_type}INDEX #{index_name}#{using} ON #{quote_schema_table(table_name)} #{literal(index[:columns])}"
end
# Parse the schema for the given table to get an array of primary key columns
def primary_key_from_schema(table)
schema(table).select{|a| a[1][:primary_key]}.map{|a| a[0]}
end
# SQL statement for renaming multiple tables.
def rename_tables_sql(renames)
rename_tos = renames.map do |from, to|
"#{quote_schema_table(from)} TO #{quote_schema_table(to)}"
end.join(', ')
"RENAME TABLE #{rename_tos}"
end
# Rollback the currently open XA transaction
def rollback_transaction(conn, opts=OPTS)
if (s = opts[:prepare]) && savepoint_level(conn) <= 1
log_connection_execute(conn, "XA END #{literal(s)}")
log_connection_execute(conn, "XA PREPARE #{literal(s)}")
log_connection_execute(conn, "XA ROLLBACK #{literal(s)}")
else
super
end
end
def schema_column_type(db_type)
case db_type
when /\Aset/io
:set
when /\Amediumint/io
:integer
when /\Amediumtext/io
:string
else
super
end
end
# Use the MySQL specific DESCRIBE syntax to get a table description.
def schema_parse_table(table_name, opts)
m = output_identifier_meth(opts[:dataset])
im = input_identifier_meth(opts[:dataset])
table = SQL::Identifier.new(im.call(table_name))
table = SQL::QualifiedIdentifier.new(im.call(opts[:schema]), table) if opts[:schema]
metadata_dataset.with_sql("DESCRIBE ?", table).map do |row|
extra = row.delete(:Extra)
if row[:primary_key] = row.delete(:Key) == 'PRI'
row[:auto_increment] = !!(extra.to_s =~ /auto_increment/i)
end
if supports_generated_columns?
# Extra field contains VIRTUAL or PERSISTENT for generated columns
row[:generated] = !!(extra.to_s =~ /VIRTUAL|STORED|PERSISTENT/i)
end
row[:allow_null] = row.delete(:Null) == 'YES'
row[:default] = row.delete(:Default)
row[:db_type] = row.delete(:Type)
row[:type] = schema_column_type(row[:db_type])
row[:extra] = extra
[m.call(row.delete(:Field)), row]
end
end
# Return nil if CHECK constraints are not supported, because
# versions that don't support check constraints don't raise
# errors for values outside of range.
def column_schema_integer_min_max_values(db_type)
super if supports_check_constraints?
end
# Split DROP INDEX ops on MySQL 5.6+, as dropping them in the same
# statement as dropping a related foreign key causes an error.
def split_alter_table_op?(op)
server_version >= 50600 && (op[:op] == :drop_index || (op[:op] == :drop_constraint && op[:type] == :unique))
end
# CHECK constraints only supported on MariaDB 10.2+ and MySQL 8.0.19+
# (at least MySQL documents DROP CONSTRAINT was supported in 8.0.19+).
def supports_check_constraints?
server_version >= (mariadb? ? 100200 : 80019)
end
# MySQL can combine multiple alter table ops into a single query.
def supports_combining_alter_table_ops?
true
end
# MySQL supports CREATE OR REPLACE VIEW.
def supports_create_or_replace_view?
true
end
# MySQL does not support named column constraints.
def supports_named_column_constraints?
false
end
# Respect the :size option if given to produce
# tinyblob, mediumblob, and longblob if :tiny,
# :medium, or :long is given.
def type_literal_generic_file(column)
case column[:size]
when :tiny # < 2^8 bytes
:tinyblob
when :medium # < 2^24 bytes
:mediumblob
when :long # < 2^32 bytes
:longblob
else # 2^16 bytes
:blob
end
end
# MySQL has both datetime and timestamp classes, most people are going
# to want datetime
def type_literal_generic_datetime(column)
if supports_timestamp_usecs?
:'datetime(6)'
elsif column[:default] == Sequel::CURRENT_TIMESTAMP
:timestamp
else
:datetime
end
end
# MySQL has both datetime and timestamp classes, most people are going
# to want datetime.
def type_literal_generic_only_time(column)
if supports_timestamp_usecs?
:'time(6)'
else
:time
end
end
# MySQL doesn't have a true boolean class, so it uses tinyint(1)
def type_literal_generic_trueclass(column)
:'tinyint(1)'
end
# MySQL 5.0.2+ supports views with check option.
def view_with_check_option_support
:local if server_version >= 50002
end
end
# Dataset methods shared by datasets that use MySQL databases.
module DatasetMethods
MATCH_AGAINST = ["MATCH ".freeze, " AGAINST (".freeze, ")".freeze].freeze
MATCH_AGAINST_BOOLEAN = ["MATCH ".freeze, " AGAINST (".freeze, " IN BOOLEAN MODE)".freeze].freeze
Dataset.def_sql_method(self, :delete, %w'with delete from where order limit')
Dataset.def_sql_method(self, :insert, %w'insert ignore into columns values on_duplicate_key_update')
Dataset.def_sql_method(self, :select, %w'with select distinct calc_found_rows columns from join where group having window compounds order limit lock')
Dataset.def_sql_method(self, :update, %w'with update ignore table set where order limit')
include Sequel::Dataset::Replace
include UnmodifiedIdentifiers::DatasetMethods
def complex_expression_sql_append(sql, op, args)
case op
when :IN, :"NOT IN"
ds = args[1]
if ds.is_a?(Sequel::Dataset) && ds.opts[:limit]
super(sql, op, [args[0], ds.from_self])
else
super
end
when :~, :'!~', :'~*', :'!~*', :LIKE, :'NOT LIKE', :ILIKE, :'NOT ILIKE'
if !db.mariadb? && db.server_version >= 80000 && [:~, :'!~'].include?(op)
func = Sequel.function(:REGEXP_LIKE, args[0], args[1], 'c')
func = ~func if op == :'!~'
return literal_append(sql, func)
end
sql << '('
literal_append(sql, args[0])
sql << ' '
sql << 'NOT ' if [:'NOT LIKE', :'NOT ILIKE', :'!~', :'!~*'].include?(op)
sql << ([:~, :'!~', :'~*', :'!~*'].include?(op) ? 'REGEXP' : 'LIKE')
sql << ' '
sql << 'BINARY ' if [:~, :'!~', :LIKE, :'NOT LIKE'].include?(op)
literal_append(sql, args[1])
if [:LIKE, :'NOT LIKE', :ILIKE, :'NOT ILIKE'].include?(op)
sql << " ESCAPE "
literal_append(sql, "\\")
end
sql << ')'
when :'||'
if args.length > 1
sql << "CONCAT"
array_sql_append(sql, args)
else
literal_append(sql, args[0])
end
when :'B~'
sql << "CAST(~"
literal_append(sql, args[0])
sql << " AS SIGNED INTEGER)"
else
super
end
end
# MySQL's CURRENT_TIMESTAMP does not use fractional seconds,
# even if the database itself supports fractional seconds. If
# MySQL 5.6.4+ is being used, use a value that will return
# fractional seconds.
def constant_sql_append(sql, constant)
if constant == :CURRENT_TIMESTAMP && supports_timestamp_usecs?
sql << 'CURRENT_TIMESTAMP(6)'
else
super
end
end
# Use GROUP BY instead of DISTINCT ON if arguments are provided.
def distinct(*args)
args.empty? ? super : group(*args)
end
# Sets up the select methods to use SQL_CALC_FOUND_ROWS option.
#
# dataset.calc_found_rows.limit(10)
# # SELECT SQL_CALC_FOUND_ROWS * FROM table LIMIT 10
def calc_found_rows
clone(:calc_found_rows => true)
end
# Sets up the select methods to delete from if deleting from a
# joined dataset:
#
# DB[:a].join(:b, a_id: :id).delete
# # DELETE a FROM a INNER JOIN b ON (b.a_id = a.id)
#
# DB[:a].join(:b, a_id: :id).delete_from(:a, :b).delete
# # DELETE a, b FROM a INNER JOIN b ON (b.a_id = a.id)
def delete_from(*tables)
clone(:delete_from=>tables)
end
# Return the results of an EXPLAIN query as a string. Options:
# :extended :: Use EXPLAIN EXTENDED instead of EXPLAIN if true.
def explain(opts=OPTS)
# Load the PrettyTable class, needed for explain output
Sequel.extension(:_pretty_table) unless defined?(Sequel::PrettyTable)
ds = db.send(:metadata_dataset).with_sql(((opts[:extended] && (db.mariadb? || db.server_version < 50700)) ? 'EXPLAIN EXTENDED ' : 'EXPLAIN ') + select_sql).naked
rows = ds.all
Sequel::PrettyTable.string(rows, ds.columns)
end
# Return a cloned dataset which will use LOCK IN SHARE MODE to lock returned rows.
def for_share
lock_style(:share)
end
# Adds full text filter
def full_text_search(cols, terms, opts = OPTS)
where(full_text_sql(cols, terms, opts))
end
# MySQL specific full text search syntax.
def full_text_sql(cols, terms, opts = OPTS)
terms = terms.join(' ') if terms.is_a?(Array)
SQL::PlaceholderLiteralString.new((opts[:boolean] ? MATCH_AGAINST_BOOLEAN : MATCH_AGAINST), [Array(cols), terms])
end
# Sets up the insert methods to use INSERT IGNORE.
# Useful if you have a unique key and want to just skip
# inserting rows that violate the unique key restriction.
#
# dataset.insert_ignore.multi_insert(
# [{name: 'a', value: 1}, {name: 'b', value: 2}]
# )
# # INSERT IGNORE INTO tablename (name, value) VALUES (a, 1), (b, 2)
def insert_ignore
clone(:insert_ignore=>true)
end
# Sets up the insert methods to use ON DUPLICATE KEY UPDATE
# If you pass no arguments, ALL fields will be
# updated with the new values. If you pass the fields you
# want then ONLY those field will be updated. If you pass a
# hash you can customize the values (for example, to increment
# a numeric field).
#
# Useful if you have a unique key and want to update
# inserting rows that violate the unique key restriction.
#
# dataset.on_duplicate_key_update.multi_insert(
# [{name: 'a', value: 1}, {name: 'b', value: 2}]
# )
# # INSERT INTO tablename (name, value) VALUES (a, 1), (b, 2)
# # ON DUPLICATE KEY UPDATE name=VALUES(name), value=VALUES(value)
#
# dataset.on_duplicate_key_update(:value).multi_insert(
# [{name: 'a', value: 1}, {name: 'b', value: 2}]
# )
# # INSERT INTO tablename (name, value) VALUES (a, 1), (b, 2)
# # ON DUPLICATE KEY UPDATE value=VALUES(value)
#
# dataset.on_duplicate_key_update(
# value: Sequel.lit('value + VALUES(value)')
# ).multi_insert(
# [{name: 'a', value: 1}, {name: 'b', value: 2}]
# )
# # INSERT INTO tablename (name, value) VALUES (a, 1), (b, 2)
# # ON DUPLICATE KEY UPDATE value=value + VALUES(value)
def on_duplicate_key_update(*args)
clone(:on_duplicate_key_update => args)
end
# MySQL uses the nonstandard ` (backtick) for quoting identifiers.
def quoted_identifier_append(sql, c)
sql << '`' << c.to_s.gsub('`', '``') << '`'
end
# MariaDB 10.2+ and MySQL 8+ support CTEs
def supports_cte?(type=:select)
if db.mariadb?
type == :select && db.server_version >= 100200
else
case type
when :select, :update, :delete
db.server_version >= 80000
end
end
end
# MySQL does not support derived column lists
def supports_derived_column_lists?
false
end
# MySQL can emulate DISTINCT ON with its non-standard GROUP BY implementation,
# though the rows returned cannot be made deterministic through ordering.
def supports_distinct_on?
true
end
# MySQL supports GROUP BY WITH ROLLUP (but not CUBE)
def supports_group_rollup?
true
end
# MariaDB 10.3+ supports INTERSECT or EXCEPT
def supports_intersect_except?
db.mariadb? && db.server_version >= 100300
end
# MySQL does not support limits in correlated subqueries (or any subqueries that use IN).
def supports_limits_in_correlated_subqueries?
false
end
# MySQL supports modifying joined datasets
def supports_modifying_joins?
true
end
# MySQL 8+ and MariaDB 10.3+ support NOWAIT.
def supports_nowait?
db.server_version >= (db.mariadb? ? 100300 : 80000)
end
# MySQL's DISTINCT ON emulation using GROUP BY does not respect the
# query's ORDER BY clause.
def supports_ordered_distinct_on?
false
end
# MySQL supports pattern matching via regular expressions
def supports_regexp?
true
end
# MySQL 8+ supports SKIP LOCKED.
def supports_skip_locked?
!db.mariadb? && db.server_version >= 80000
end
# Check the database setting for whether fractional timestamps
# are suppported.
def supports_timestamp_usecs?
db.supports_timestamp_usecs?
end
# MySQL 8+ supports WINDOW clause.
def supports_window_clause?
!db.mariadb? && db.server_version >= 80000
end
# MariaDB 10.2+ and MySQL 8+ support window functions
def supports_window_functions?
db.server_version >= (db.mariadb? ? 100200 : 80000)
end
# Sets up the update methods to use UPDATE IGNORE.
# Useful if you have a unique key and want to just skip
# updating rows that violate the unique key restriction.
#
# dataset.update_ignore.update(name: 'a', value: 1)
# # UPDATE IGNORE tablename SET name = 'a', value = 1
def update_ignore
clone(:update_ignore=>true)
end
private
# Allow update and delete for limited datasets, unless there is an offset.
def check_not_limited!(type)
super if type == :truncate || @opts[:offset]
end
# Consider the first table in the joined dataset is the table to delete
# from, but include the others for the purposes of selecting rows.
def delete_from_sql(sql)
if joined_dataset?
sql << ' '
tables = @opts[:delete_from] || @opts[:from][0..0]
source_list_append(sql, tables)
sql << ' FROM '
source_list_append(sql, @opts[:from])
select_join_sql(sql)
else
super
end
end
# MySQL doesn't use the SQL standard DEFAULT VALUES.
def insert_columns_sql(sql)
values = opts[:values]
if values.is_a?(Array) && values.empty?
sql << " ()"
else
super
end
end
# MySQL supports INSERT IGNORE INTO
def insert_ignore_sql(sql)
sql << " IGNORE" if opts[:insert_ignore]
end
# MySQL supports UPDATE IGNORE
def update_ignore_sql(sql)
sql << " IGNORE" if opts[:update_ignore]
end
# MySQL supports INSERT ... ON DUPLICATE KEY UPDATE
def insert_on_duplicate_key_update_sql(sql)
if update_cols = opts[:on_duplicate_key_update]
update_vals = nil
if update_cols.empty?
update_cols = columns
elsif update_cols.last.is_a?(Hash)
update_vals = update_cols.last
update_cols = update_cols[0..-2]
end
sql << " ON DUPLICATE KEY UPDATE "
c = false
co = ', '
values = '=VALUES('
endp = ')'
update_cols.each do |col|
sql << co if c
quote_identifier_append(sql, col)
sql << values
quote_identifier_append(sql, col)
sql << endp
c ||= true
end
if update_vals
eq = '='
update_vals.map do |col,v|
sql << co if c
quote_identifier_append(sql, col)
sql << eq
literal_append(sql, v)
c ||= true
end
end
end
end
# MySQL doesn't use the standard DEFAULT VALUES for empty values.
def insert_values_sql(sql)
values = opts[:values]
if values.is_a?(Array) && values.empty?
sql << " VALUES ()"
else
super
end
end
# Transforms :straight to STRAIGHT_JOIN.
def join_type_sql(join_type)
if join_type == :straight
'STRAIGHT_JOIN'
else
super
end
end
# MySQL allows a LIMIT in DELETE and UPDATE statements.
def limit_sql(sql)
if l = @opts[:limit]
sql << " LIMIT "
literal_append(sql, l)
end
end
alias delete_limit_sql limit_sql
alias update_limit_sql limit_sql
# MySQL uses a preceding X for hex escaping strings
def literal_blob_append(sql, v)
if v.empty?
sql << "''"
else
sql << "0x" << v.unpack("H*").first
end
end
# Use 0 for false on MySQL
def literal_false
'0'
end
# Raise error for infinitate and NaN values
def literal_float(v)
if v.infinite? || v.nan?
raise InvalidValue, "Infinite floats and NaN values are not valid on MySQL"
else
super
end
end
# SQL fragment for String. Doubles \ and ' by default.
def literal_string_append(sql, v)
sql << "'" << v.gsub("\\", "\\\\\\\\").gsub("'", "''") << "'"
end
# Use 1 for true on MySQL
def literal_true
'1'
end
# MySQL supports multiple rows in VALUES in INSERT.
def multi_insert_sql_strategy
:values
end
def non_sql_option?(key)
super || key == :insert_ignore || key == :update_ignore || key == :on_duplicate_key_update
end
# MySQL does not natively support NULLS FIRST/LAST.
def requires_emulating_nulls_first?
true
end
def select_only_offset_sql(sql)
sql << " LIMIT "
literal_append(sql, @opts[:offset])
sql << ",18446744073709551615"
end
# Support FOR SHARE locking when using the :share lock style.
# Use SKIP LOCKED if skipping locked rows.
def select_lock_sql(sql)
lock = @opts[:lock]
if lock == :share
if !db.mariadb? && db.server_version >= 80000
sql << ' FOR SHARE'
else
sql << ' LOCK IN SHARE MODE'
end
else
super
end
if lock
if @opts[:skip_locked]
sql << " SKIP LOCKED"
elsif @opts[:nowait]
sql << " NOWAIT"
end
end
end
# MySQL specific SQL_CALC_FOUND_ROWS option
def select_calc_found_rows_sql(sql)
sql << ' SQL_CALC_FOUND_ROWS' if opts[:calc_found_rows]
end
# Use WITH RECURSIVE instead of WITH if any of the CTEs is recursive
def select_with_sql_base
opts[:with].any?{|w| w[:recursive]} ? "WITH RECURSIVE " : super
end
# MySQL uses WITH ROLLUP syntax.
def uses_with_rollup?
true
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/oracle.rb 0000664 0000000 0000000 00000056740 14342141206 0022023 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/emulate_offset_with_row_number'
module Sequel
module Oracle
Sequel::Database.set_shared_adapter_scheme(:oracle, self)
def self.mock_adapter_setup(db)
db.instance_exec do
@server_version = 11000000
@primary_key_sequences = {}
end
end
module DatabaseMethods
attr_accessor :autosequence
def create_sequence(name, opts=OPTS)
self << create_sequence_sql(name, opts)
end
def create_trigger(*args)
self << create_trigger_sql(*args)
end
def current_user
@current_user ||= metadata_dataset.get{sys_context('USERENV', 'CURRENT_USER')}
end
def drop_sequence(name)
self << drop_sequence_sql(name)
end
def database_type
:oracle
end
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
schema, table = schema_and_table(table)
ds = metadata_dataset.
from{[all_cons_columns.as(:pc), all_constraints.as(:p), all_cons_columns.as(:fc), all_constraints.as(:f)]}.
where{{
f[:table_name]=>im.call(table),
f[:constraint_type]=>'R',
p[:owner]=>f[:r_owner],
p[:constraint_name]=>f[:r_constraint_name],
pc[:owner]=>p[:owner],
pc[:constraint_name]=>p[:constraint_name],
pc[:table_name]=>p[:table_name],
fc[:owner]=>f[:owner],
fc[:constraint_name]=>f[:constraint_name],
fc[:table_name]=>f[:table_name],
fc[:position]=>pc[:position]}}.
select{[p[:table_name].as(:table), pc[:column_name].as(:key), fc[:column_name].as(:column), f[:constraint_name].as(:name)]}.
order{[:table, fc[:position]]}
ds = ds.where{{f[:schema_name]=>im.call(schema)}} if schema
fks = {}
ds.each do |r|
if fk = fks[r[:name]]
fk[:columns] << m.call(r[:column])
fk[:key] << m.call(r[:key])
else
fks[r[:name]] = {:name=>m.call(r[:name]), :columns=>[m.call(r[:column])], :table=>m.call(r[:table]), :key=>[m.call(r[:key])]}
end
end
fks.values
end
def freeze
current_user
server_version
@conversion_procs.freeze
super
end
# Oracle namespaces indexes per table.
def global_index_namespace?
false
end
IGNORE_OWNERS = %w'APEX_040000 CTXSYS EXFSYS MDSYS OLAPSYS ORDDATA ORDSYS SYS SYSTEM XDB XDBMETADATA XDBPM XFILES WMSYS'.freeze
def tables(opts=OPTS)
m = output_identifier_meth
metadata_dataset.from(:all_tables).
server(opts[:server]).
where(:dropped=>'NO').
exclude(:owner=>IGNORE_OWNERS).
select(:table_name).
map{|r| m.call(r[:table_name])}
end
def views(opts=OPTS)
m = output_identifier_meth
metadata_dataset.from(:all_views).
server(opts[:server]).
exclude(:owner=>IGNORE_OWNERS).
select(:view_name).
map{|r| m.call(r[:view_name])}
end
# Whether a view with a given name exists. By default, looks in all schemas other than system
# schemas. If the :current_schema option is given, looks in the schema for the current user.
def view_exists?(name, opts=OPTS)
ds = metadata_dataset.from(:all_views).where(:view_name=>input_identifier_meth.call(name))
if opts[:current_schema]
ds = ds.where(:owner=>Sequel.function(:SYS_CONTEXT, 'userenv', 'current_schema'))
else
ds = ds.exclude(:owner=>IGNORE_OWNERS)
end
ds.count > 0
end
# The version of the Oracle server, used for determining capability.
def server_version(server=nil)
return @server_version if @server_version
@server_version = synchronize(server) do |conn|
(conn.server_version rescue nil) if conn.respond_to?(:server_version)
end
unless @server_version
@server_version = if m = /(\d+)\.(\d+)\.?(\d+)?\.?(\d+)?/.match(fetch("select version from PRODUCT_COMPONENT_VERSION where lower(product) like 'oracle%'").single_value)
(m[1].to_i*1000000) + (m[2].to_i*10000) + (m[3].to_i*100) + m[4].to_i
else
0
end
end
@server_version
end
# Oracle supports deferrable constraints.
def supports_deferrable_constraints?
true
end
# Oracle supports transaction isolation levels.
def supports_transaction_isolation_levels?
true
end
private
def alter_table_sql(table, op)
case op[:op]
when :add_column
if op[:primary_key]
sqls = []
sqls << alter_table_sql(table, op.merge(:primary_key=>nil))
if op[:auto_increment]
seq_name = default_sequence_name(table, op[:name])
sqls << drop_sequence_sql(seq_name)
sqls << create_sequence_sql(seq_name, op)
sqls << "UPDATE #{quote_schema_table(table)} SET #{quote_identifier(op[:name])} = #{seq_name}.nextval"
end
sqls << "ALTER TABLE #{quote_schema_table(table)} ADD PRIMARY KEY (#{quote_identifier(op[:name])})"
sqls
else
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
end
when :set_column_null
"ALTER TABLE #{quote_schema_table(table)} MODIFY #{quote_identifier(op[:name])} #{op[:null] ? 'NULL' : 'NOT NULL'}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} MODIFY #{quote_identifier(op[:name])} #{type_literal(op)}"
when :set_column_default
"ALTER TABLE #{quote_schema_table(table)} MODIFY #{quote_identifier(op[:name])} DEFAULT #{literal(op[:default])}"
else
super(table, op)
end
end
def auto_increment_sql
''
end
# Do not support min/max integer values on Oracle, since
# Oracle uses a number type, and integer just adds a
# constaint on the number type.
def column_schema_integer_min_max_values(db_type)
nil
end
def create_sequence_sql(name, opts=OPTS)
"CREATE SEQUENCE #{quote_identifier(name)} start with #{opts [:start_with]||1} increment by #{opts[:increment_by]||1} nomaxvalue"
end
def create_table_from_generator(name, generator, options)
drop_statement, create_statements = create_table_sql_list(name, generator, options)
swallow_database_error{execute_ddl(drop_statement)} if drop_statement
create_statements.each{|sql| execute_ddl(sql)}
end
def create_table_sql_list(name, generator, options=OPTS)
statements = [create_table_sql(name, generator, options)]
drop_seq_statement = nil
generator.columns.each do |c|
if c[:auto_increment]
c[:sequence_name] ||= default_sequence_name(name, c[:name])
unless c[:create_sequence] == false
drop_seq_statement = drop_sequence_sql(c[:sequence_name])
statements << create_sequence_sql(c[:sequence_name], c)
end
unless c[:create_trigger] == false
c[:trigger_name] ||= "BI_#{name}_#{c[:name]}"
trigger_definition = <<-end_sql
BEGIN
IF :NEW.#{quote_identifier(c[:name])} IS NULL THEN
SELECT #{c[:sequence_name]}.nextval INTO :NEW.#{quote_identifier(c[:name])} FROM dual;
END IF;
END;
end_sql
statements << create_trigger_sql(name, c[:trigger_name], trigger_definition, {:events => [:insert]})
end
end
end
[drop_seq_statement, statements]
end
def create_trigger_sql(table, name, definition, opts=OPTS)
events = opts[:events] ? Array(opts[:events]) : [:insert, :update, :delete]
sql = <<-end_sql
CREATE#{' OR REPLACE' if opts[:replace]} TRIGGER #{quote_identifier(name)}
#{opts[:after] ? 'AFTER' : 'BEFORE'} #{events.map{|e| e.to_s.upcase}.join(' OR ')} ON #{quote_schema_table(table)}
REFERENCING NEW AS NEW FOR EACH ROW
#{definition}
end_sql
sql
end
DATABASE_ERROR_REGEXPS = {
/unique constraint .+ violated/ => UniqueConstraintViolation,
/integrity constraint .+ violated/ => ForeignKeyConstraintViolation,
/check constraint .+ violated/ => CheckConstraintViolation,
/cannot insert NULL into|cannot update .+ to NULL/ => NotNullConstraintViolation,
/can't serialize access for this transaction/ => SerializationFailure,
/resource busy and acquire with NOWAIT specified or timeout/ => DatabaseLockTimeout,
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
def default_sequence_name(table, column)
"seq_#{table}_#{column}"
end
def drop_sequence_sql(name)
"DROP SEQUENCE #{quote_identifier(name)}"
end
def remove_cached_schema(table)
Sequel.synchronize{@primary_key_sequences.delete(table)}
super
end
TRANSACTION_ISOLATION_LEVELS = {:uncommitted=>'READ COMMITTED'.freeze,
:committed=>'READ COMMITTED'.freeze,
:repeatable=>'SERIALIZABLE'.freeze,
:serializable=>'SERIALIZABLE'.freeze}.freeze
# Oracle doesn't support READ UNCOMMITTED OR REPEATABLE READ transaction
# isolation levels, so upgrade to the next highest level in those cases.
def set_transaction_isolation_sql(level)
"SET TRANSACTION ISOLATION LEVEL #{TRANSACTION_ISOLATION_LEVELS[level]}"
end
def sequence_for_table(table)
return nil unless autosequence
Sequel.synchronize{return @primary_key_sequences[table] if @primary_key_sequences.has_key?(table)}
begin
sch = schema(table)
rescue Sequel::Error
return nil
end
pk = sch.select{|k, v| v[:primary_key]}
pks = if pk.length == 1
seq = "seq_#{table}_#{pk.first.first}"
seq.to_sym unless from(:user_sequences).where(:sequence_name=>input_identifier_meth.call(seq)).empty?
end
Sequel.synchronize{@primary_key_sequences[table] = pks}
end
# Oracle supports CREATE OR REPLACE VIEW.
def supports_create_or_replace_view?
true
end
# Oracle's integer/:number type handles larger values than
# most other databases's bigint types, so it should be
# safe to use for Bignum.
def type_literal_generic_bignum_symbol(column)
:integer
end
# Oracle doesn't have a time type, so use timestamp for all
# time columns.
def type_literal_generic_only_time(column)
:timestamp
end
# Oracle doesn't have a boolean type or even a reasonable
# facsimile. Using a char(1) seems to be the recommended way.
def type_literal_generic_trueclass(column)
:'char(1)'
end
# SQL fragment for showing a table is temporary
def temporary_table_sql
'GLOBAL TEMPORARY '
end
# Oracle uses clob for text types.
def uses_clob_for_text?
true
end
# Oracle supports views with check option, but not local.
def view_with_check_option_support
true
end
end
module DatasetMethods
ROW_NUMBER_EXPRESSION = LiteralString.new('ROWNUM').freeze
BITAND_PROC = lambda{|a, b| Sequel.lit(["CAST(BITAND(", ", ", ") AS INTEGER)"], a, b)}
include(Module.new do
Dataset.def_sql_method(self, :select, %w'with select distinct columns from join where group having compounds order limit lock')
end)
def complex_expression_sql_append(sql, op, args)
case op
when :&
complex_expression_arg_pairs_append(sql, args, &BITAND_PROC)
when :|
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.lit(["(", " - ", " + ", ")"], a, complex_expression_arg_pairs([a, b], &BITAND_PROC), b)}
when :^
complex_expression_arg_pairs_append(sql, args) do |*x|
s1 = complex_expression_arg_pairs(x){|a, b| Sequel.lit(["(", " - ", " + ", ")"], a, complex_expression_arg_pairs([a, b], &BITAND_PROC), b)}
s2 = complex_expression_arg_pairs(x, &BITAND_PROC)
Sequel.lit(["(", " - ", ")"], s1, s2)
end
when :~, :'!~', :'~*', :'!~*'
raise InvalidOperation, "Pattern matching via regular expressions is not supported in this Oracle version" unless supports_regexp?
if op == :'!~' || op == :'!~*'
sql << 'NOT '
end
sql << 'REGEXP_LIKE('
literal_append(sql, args[0])
sql << ','
literal_append(sql, args[1])
if op == :'~*' || op == :'!~*'
sql << ", 'i'"
end
sql << ')'
when :%, :<<, :>>, :'B~'
complex_expression_emulate_append(sql, op, args)
else
super
end
end
# Oracle doesn't support CURRENT_TIME, as it doesn't have
# a type for storing just time values without a date, so
# use CURRENT_TIMESTAMP in its place.
def constant_sql_append(sql, c)
if c == :CURRENT_TIME
super(sql, :CURRENT_TIMESTAMP)
else
super
end
end
# Oracle uses MINUS instead of EXCEPT, and doesn't support EXCEPT ALL
def except(dataset, opts=OPTS)
raise(Sequel::Error, "EXCEPT ALL not supported") if opts[:all]
compound_clone(:minus, dataset, opts)
end
# Use a custom expression with EXISTS to determine whether a dataset
# is empty.
def empty?
db[:dual].where(@opts[:offset] ? exists : unordered.exists).get(1) == nil
end
# Oracle requires SQL standard datetimes
def requires_sql_standard_datetimes?
true
end
# Create a copy of this dataset associated to the given sequence name,
# which will be used when calling insert to find the most recently
# inserted value for the sequence.
def sequence(s)
clone(:sequence=>s)
end
# Handle LIMIT by using a unlimited subselect filtered with ROWNUM,
# unless Oracle 12 is used.
def select_sql
return super if @opts[:sql]
return super if supports_fetch_next_rows?
o = @opts[:offset]
if o && o != 0
columns = clone(:append_sql=>String.new, :placeholder_literal_null=>true).columns
dsa1 = dataset_alias(1)
rn = row_number_column
limit = @opts[:limit]
ds = unlimited.
from_self(:alias=>dsa1).
select_append(ROW_NUMBER_EXPRESSION.as(rn)).
from_self(:alias=>dsa1).
select(*columns).
where(SQL::Identifier.new(rn) > o)
ds = ds.where(SQL::Identifier.new(rn) <= Sequel.+(o, limit)) if limit
sql = @opts[:append_sql] || String.new
subselect_sql_append(sql, ds)
sql
elsif limit = @opts[:limit]
ds = unlimited
# Lock doesn't work in subselects, so don't use a subselect when locking.
# Don't use a subselect if custom SQL is used, as it breaks somethings.
ds = ds.from_self unless @opts[:lock]
sql = @opts[:append_sql] || String.new
subselect_sql_append(sql, ds.where(SQL::ComplexExpression.new(:<=, ROW_NUMBER_EXPRESSION, limit)))
sql
else
super
end
end
# Oracle requires recursive CTEs to have column aliases.
def recursive_cte_requires_column_aliases?
true
end
def supports_cte?(type=:select)
type == :select
end
# Oracle does not support derived column lists
def supports_derived_column_lists?
false
end
# Oracle supports FETCH NEXT ROWS since 12c, but it doesn't work when
# locking or when skipping locked rows.
def supports_fetch_next_rows?
server_version >= 12000000 && !(@opts[:lock] || @opts[:skip_locked])
end
# Oracle supports GROUP BY CUBE
def supports_group_cube?
true
end
# Oracle supports GROUP BY ROLLUP
def supports_group_rollup?
true
end
# Oracle supports GROUPING SETS
def supports_grouping_sets?
true
end
# Oracle does not support INTERSECT ALL or EXCEPT ALL
def supports_intersect_except_all?
false
end
# Oracle does not support IS TRUE.
def supports_is_true?
false
end
# Oracle does not support limits in correlated subqueries.
def supports_limits_in_correlated_subqueries?
false
end
# Oracle supports MERGE
def supports_merge?
true
end
# Oracle supports NOWAIT.
def supports_nowait?
true
end
# Oracle does not support offsets in correlated subqueries.
def supports_offsets_in_correlated_subqueries?
false
end
# Oracle does not support SELECT *, column
def supports_select_all_and_column?
false
end
# Oracle supports SKIP LOCKED.
def supports_skip_locked?
true
end
# Oracle supports timezones in literal timestamps.
def supports_timestamp_timezones?
true
end
# Oracle does not support WHERE 'Y' for WHERE TRUE.
def supports_where_true?
false
end
# Oracle supports window functions
def supports_window_functions?
true
end
# The version of the database server
def server_version
db.server_version(@opts[:server])
end
# Oracle 10+ supports pattern matching via regular expressions
def supports_regexp?
server_version >= 10010002
end
private
# Handle nil, false, and true MERGE WHEN conditions to avoid non-boolean
# type error.
def _normalize_merge_when_conditions(conditions)
case conditions
when nil, false
{1=>0}
when true
{1=>1}
when Sequel::SQL::DelayedEvaluation
Sequel.delay{_normalize_merge_when_conditions(conditions.call(self))}
else
conditions
end
end
# Handle Oracle's non standard MERGE syntax
def _merge_when_sql(sql)
raise Error, "no WHEN [NOT] MATCHED clauses provided for MERGE" unless merge_when = @opts[:merge_when]
insert = update = delete = nil
types = merge_when.map{|d| d[:type]}
raise Error, "Oracle does not support multiple INSERT, UPDATE, or DELETE clauses in MERGE" if types != types.uniq
merge_when.each do |data|
case data[:type]
when :insert
insert = data
when :update
update = data
else # when :delete
delete = data
end
end
if delete
raise Error, "Oracle does not support DELETE without UPDATE clause in MERGE" unless update
raise Error, "Oracle does not support DELETE without conditions clause in MERGE" unless delete.has_key?(:conditions)
end
if update
sql << " WHEN MATCHED"
_merge_update_sql(sql, update)
_merge_when_conditions_sql(sql, update)
if delete
sql << " DELETE"
_merge_when_conditions_sql(sql, delete)
end
end
if insert
sql << " WHEN NOT MATCHED"
_merge_insert_sql(sql, insert)
_merge_when_conditions_sql(sql, insert)
end
end
# Handle Oracle's non-standard MERGE WHEN condition syntax.
def _merge_when_conditions_sql(sql, data)
if data.has_key?(:conditions)
sql << " WHERE "
literal_append(sql, _normalize_merge_when_conditions(data[:conditions]))
end
end
# Allow preparing prepared statements, since determining the prepared sql to use for
# a prepared statement requires calling prepare on that statement.
def allow_preparing_prepared_statements?
true
end
# Oracle doesn't support the use of AS when aliasing a dataset. It doesn't require
# the use of AS anywhere, so this disables it in all cases. Oracle also does not support
# derived column lists in aliases.
def as_sql_append(sql, aliaz, column_aliases=nil)
raise Error, "oracle does not support derived column lists" if column_aliases
sql << ' '
quote_identifier_append(sql, aliaz)
end
# The strftime format to use when literalizing the time.
def default_timestamp_format
"TIMESTAMP '%Y-%m-%d %H:%M:%S%N %z'"
end
def empty_from_sql
' FROM DUAL'
end
# There is no function on Oracle that does character length
# and respects trailing spaces (datalength respects trailing spaces, but
# counts bytes instead of characters). Use a hack to work around the
# trailing spaces issue.
def emulate_function?(name)
name == :char_length
end
# Oracle treats empty strings like NULL values, and doesn't support
# char_length, so make char_length use length with a nonempty string.
# Unfortunately, as Oracle treats the empty string as NULL, there is
# no way to get trim to return an empty string instead of nil if
# the string only contains spaces.
def emulate_function_sql_append(sql, f)
if f.name == :char_length
literal_append(sql, Sequel::SQL::Function.new(:length, Sequel.join([f.args.first, 'x'])) - 1)
end
end
# If this dataset is associated with a sequence, return the most recently
# inserted sequence value.
def execute_insert(sql, opts=OPTS)
opts = Hash[opts]
if f = @opts[:from]
opts[:table] = f.first
end
opts[:sequence] = @opts[:sequence]
super
end
# Use a colon for the timestamp offset, since Oracle appears to require it.
def format_timestamp_offset(hour, minute)
sprintf("%+03i:%02i", hour, minute)
end
# Oracle doesn't support empty values when inserting.
def insert_supports_empty_values?
false
end
# Use string in hex format for blob data.
def literal_blob_append(sql, v)
sql << "'" << v.unpack("H*").first << "'"
end
# Oracle uses 'N' for false values.
def literal_false
"'N'"
end
# Oracle uses the SQL standard of only doubling ' inside strings.
def literal_string_append(sql, v)
sql << "'" << v.gsub("'", "''") << "'"
end
# Oracle uses 'Y' for true values.
def literal_true
"'Y'"
end
# Oracle can insert multiple rows using a UNION
def multi_insert_sql_strategy
:union
end
def select_limit_sql(sql)
return unless supports_fetch_next_rows?
if offset = @opts[:offset]
sql << " OFFSET "
literal_append(sql, offset)
sql << " ROWS"
end
if limit = @opts[:limit]
sql << " FETCH NEXT "
literal_append(sql, limit)
sql << " ROWS ONLY"
end
end
# Use SKIP LOCKED if skipping locked rows.
def select_lock_sql(sql)
super
if @opts[:lock]
if @opts[:skip_locked]
sql << " SKIP LOCKED"
elsif @opts[:nowait]
sql << " NOWAIT"
end
end
end
# Oracle supports quoted function names.
def supports_quoted_function_names?
true
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/postgres.rb 0000664 0000000 0000000 00000272206 14342141206 0022421 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/unmodified_identifiers'
module Sequel
# Top level module for holding all PostgreSQL-related modules and classes
# for Sequel. All adapters that connect to PostgreSQL support the following options:
#
# :client_min_messages :: Change the minimum level of messages that PostgreSQL will send to the
# the client. The PostgreSQL default is NOTICE, the Sequel default is
# WARNING. Set to nil to not change the server default. Overridable on
# a per instance basis via the :client_min_messages option.
# :force_standard_strings :: Set to false to not force the use of standard strings. Overridable
# on a per instance basis via the :force_standard_strings option.
# :search_path :: Set the schema search_path for this Database's connections.
# Allows to to set which schemas do not need explicit
# qualification, and in which order to check the schemas when
# an unqualified object is referenced.
module Postgres
Sequel::Database.set_shared_adapter_scheme(:postgres, self)
# Exception class ranged when literalizing integers outside the bigint/int8 range.
class IntegerOutsideBigintRange < InvalidValue; end
NAN = 0.0/0.0
PLUS_INFINITY = 1.0/0.0
MINUS_INFINITY = -1.0/0.0
boolean = Object.new
def boolean.call(s) s == 't' end
integer = Object.new
def integer.call(s) s.to_i end
float = Object.new
def float.call(s)
case s
when 'NaN'
NAN
when 'Infinity'
PLUS_INFINITY
when '-Infinity'
MINUS_INFINITY
else
s.to_f
end
end
date = Object.new
def date.call(s) ::Date.new(*s.split('-').map(&:to_i)) end
TYPE_TRANSLATOR_DATE = date.freeze
bytea = Object.new
def bytea.call(str)
str = if str =~ /\A\\x/
# PostgreSQL 9.0+ bytea hex format
str[2..-1].gsub(/(..)/){|s| s.to_i(16).chr}
else
# Historical PostgreSQL bytea escape format
str.gsub(/\\(\\|'|[0-3][0-7][0-7])/) {|s|
if s.size == 2 then s[1,1] else s[1,3].oct.chr end
}
end
::Sequel::SQL::Blob.new(str)
end
CONVERSION_PROCS = {}
{
[16] => boolean,
[17] => bytea,
[20, 21, 23, 26] => integer,
[700, 701] => float,
[1700] => ::Kernel.method(:BigDecimal),
[1083, 1266] => ::Sequel.method(:string_to_time),
[1082] => ::Sequel.method(:string_to_date),
[1184, 1114] => ::Sequel.method(:database_to_application_timestamp),
}.each do |k,v|
k.each do |n|
CONVERSION_PROCS[n] = v
end
end
CONVERSION_PROCS.freeze
module MockAdapterDatabaseMethods
def bound_variable_arg(arg, conn)
arg
end
def primary_key(table)
:id
end
private
# Handle NoMethodErrors when parsing schema due to output_identifier
# being called with nil when the Database fetch results are not set
# to what schema parsing expects.
def schema_parse_table(table, opts=OPTS)
super
rescue NoMethodError
[]
end
end
def self.mock_adapter_setup(db)
db.instance_exec do
@server_version = 150000
initialize_postgres_adapter
extend(MockAdapterDatabaseMethods)
end
end
class CreateTableGenerator < Sequel::Schema::CreateTableGenerator
# Add an exclusion constraint when creating the table. Elements should be
# an array of 2 element arrays, with the first element being the column or
# expression the exclusion constraint is applied to, and the second element
# being the operator to use for the column/expression to check for exclusion:
#
# exclude([[:col1, '&&'], [:col2, '=']])
# # EXCLUDE USING gist (col1 WITH &&, col2 WITH =)
#
# To use a custom operator class, you need to use Sequel.lit with the expression
# and operator class:
#
# exclude([[Sequel.lit('col1 inet_ops'), '&&'], [:col2, '=']])
# # EXCLUDE USING gist (col1 inet_ops WITH &&, col2 WITH =)
#
# Options supported:
#
# :name :: Name the constraint with the given name (useful if you may
# need to drop the constraint later)
# :using :: Override the index_method for the exclusion constraint (defaults to gist).
# :where :: Create a partial exclusion constraint, which only affects
# a subset of table rows, value should be a filter expression.
def exclude(elements, opts=OPTS)
constraints << {:type => :exclude, :elements => elements}.merge!(opts)
end
end
class AlterTableGenerator < Sequel::Schema::AlterTableGenerator
# Adds an exclusion constraint to an existing table, see
# CreateTableGenerator#exclude.
def add_exclusion_constraint(elements, opts=OPTS)
@operations << {:op => :add_constraint, :type => :exclude, :elements => elements}.merge!(opts)
end
# Validate the constraint with the given name, which should have
# been added previously with NOT VALID.
def validate_constraint(name)
@operations << {:op => :validate_constraint, :name => name}
end
end
# Generator used for creating tables that are partitions of other tables.
class CreatePartitionOfTableGenerator
MINVALUE = Sequel.lit('MINVALUE').freeze
MAXVALUE = Sequel.lit('MAXVALUE').freeze
def initialize(&block)
instance_exec(&block)
end
# The minimum value of the data type used in range partitions, useful
# as an argument to #from.
def minvalue
MINVALUE
end
# The minimum value of the data type used in range partitions, useful
# as an argument to #to.
def maxvalue
MAXVALUE
end
# Assumes range partitioning, sets the inclusive minimum value of the range for
# this partition.
def from(*v)
@from = v
end
# Assumes range partitioning, sets the exclusive maximum value of the range for
# this partition.
def to(*v)
@to = v
end
# Assumes list partitioning, sets the values to be included in this partition.
def values_in(*v)
@in = v
end
# Assumes hash partitioning, sets the modulus for this parition.
def modulus(v)
@modulus = v
end
# Assumes hash partitioning, sets the remainder for this parition.
def remainder(v)
@remainder = v
end
# Sets that this is a default partition, where values not in other partitions
# are stored.
def default
@default = true
end
# The from and to values of this partition for a range partition.
def range
[@from, @to]
end
# The values to include in this partition for a list partition.
def list
@in
end
# The modulus and remainder to use for this partition for a hash partition.
def hash_values
[@modulus, @remainder]
end
# Determine the appropriate partition type for this partition by which methods
# were called on it.
def partition_type
raise Error, "Unable to determine partition type, multiple different partitioning methods called" if [@from || @to, @list, @modulus || @remainder, @default].compact.length > 1
if @from || @to
raise Error, "must call both from and to when creating a partition of a table if calling either" unless @from && @to
:range
elsif @in
:list
elsif @modulus || @remainder
raise Error, "must call both modulus and remainder when creating a partition of a table if calling either" unless @modulus && @remainder
:hash
elsif @default
:default
else
raise Error, "unable to determine partition type, no partitioning methods called"
end
end
end
# Error raised when Sequel determines a PostgreSQL exclusion constraint has been violated.
class ExclusionConstraintViolation < Sequel::ConstraintViolation; end
module DatabaseMethods
include UnmodifiedIdentifiers::DatabaseMethods
FOREIGN_KEY_LIST_ON_DELETE_MAP = {'a'=>:no_action, 'r'=>:restrict, 'c'=>:cascade, 'n'=>:set_null, 'd'=>:set_default}.freeze
ON_COMMIT = {:drop => 'DROP', :delete_rows => 'DELETE ROWS', :preserve_rows => 'PRESERVE ROWS'}.freeze
ON_COMMIT.each_value(&:freeze)
# SQL fragment for custom sequences (ones not created by serial primary key),
# Returning the schema and literal form of the sequence name, by parsing
# the column defaults table.
SELECT_CUSTOM_SEQUENCE_SQL = (<<-end_sql
SELECT name.nspname AS "schema",
CASE
WHEN split_part(pg_get_expr(def.adbin, attr.attrelid), '''', 2) ~ '.' THEN
substr(split_part(pg_get_expr(def.adbin, attr.attrelid), '''', 2),
strpos(split_part(pg_get_expr(def.adbin, attr.attrelid), '''', 2), '.')+1)
ELSE split_part(pg_get_expr(def.adbin, attr.attrelid), '''', 2)
END AS "sequence"
FROM pg_class t
JOIN pg_namespace name ON (t.relnamespace = name.oid)
JOIN pg_attribute attr ON (t.oid = attrelid)
JOIN pg_attrdef def ON (adrelid = attrelid AND adnum = attnum)
JOIN pg_constraint cons ON (conrelid = adrelid AND adnum = conkey[1])
WHERE cons.contype = 'p'
AND pg_get_expr(def.adbin, attr.attrelid) ~* 'nextval'
end_sql
).strip.gsub(/\s+/, ' ').freeze # SEQUEL6: Remove
# SQL fragment for determining primary key column for the given table. Only
# returns the first primary key if the table has a composite primary key.
SELECT_PK_SQL = (<<-end_sql
SELECT pg_attribute.attname AS pk
FROM pg_class, pg_attribute, pg_index, pg_namespace
WHERE pg_class.oid = pg_attribute.attrelid
AND pg_class.relnamespace = pg_namespace.oid
AND pg_class.oid = pg_index.indrelid
AND pg_index.indkey[0] = pg_attribute.attnum
AND pg_index.indisprimary = 't'
end_sql
).strip.gsub(/\s+/, ' ').freeze # SEQUEL6: Remove
# SQL fragment for getting sequence associated with table's
# primary key, assuming it was a serial primary key column.
SELECT_SERIAL_SEQUENCE_SQL = (<<-end_sql
SELECT name.nspname AS "schema", seq.relname AS "sequence"
FROM pg_class seq, pg_attribute attr, pg_depend dep,
pg_namespace name, pg_constraint cons, pg_class t
WHERE seq.oid = dep.objid
AND seq.relnamespace = name.oid
AND seq.relkind = 'S'
AND attr.attrelid = dep.refobjid
AND attr.attnum = dep.refobjsubid
AND attr.attrelid = cons.conrelid
AND attr.attnum = cons.conkey[1]
AND attr.attrelid = t.oid
AND cons.contype = 'p'
end_sql
).strip.gsub(/\s+/, ' ').freeze # SEQUEL6: Remove
# A hash of conversion procs, keyed by type integer (oid) and
# having callable values for the conversion proc for that type.
attr_reader :conversion_procs
# Set a conversion proc for the given oid. The callable can
# be passed either as a argument or a block.
def add_conversion_proc(oid, callable=nil, &block)
conversion_procs[oid] = callable || block
end
# Add a conversion proc for a named type, using the given block.
# This should be used for types without fixed OIDs, which includes all types that
# are not included in a default PostgreSQL installation.
def add_named_conversion_proc(name, &block)
unless oid = from(:pg_type).where(:typtype=>['b', 'e'], :typname=>name.to_s).get(:oid)
raise Error, "No matching type in pg_type for #{name.inspect}"
end
add_conversion_proc(oid, block)
end
def commit_prepared_transaction(transaction_id, opts=OPTS)
run("COMMIT PREPARED #{literal(transaction_id)}", opts)
end
# A hash of metadata for CHECK constraints on the table.
# Keys are CHECK constraint name symbols. Values are hashes with the following keys:
# :definition :: An SQL fragment for the definition of the constraint
# :columns :: An array of column symbols for the columns referenced in the constraint,
# can be an empty array if the database cannot deteremine the column symbols.
def check_constraints(table)
m = output_identifier_meth
hash = {}
_check_constraints_ds.where_each(:conrelid=>regclass_oid(table)) do |row|
constraint = m.call(row[:constraint])
entry = hash[constraint] ||= {:definition=>row[:definition], :columns=>[]}
entry[:columns] << m.call(row[:column]) if row[:column]
end
hash
end
# Convert the first primary key column in the +table+ from being a serial column to being an identity column.
# If the column is already an identity column, assume it was already converted and make no changes.
#
# Only supported on PostgreSQL 10.2+, since on those versions Sequel will use identity columns
# instead of serial columns for auto incrementing primary keys. Only supported when running as
# a superuser, since regular users cannot modify system tables, and there is no way to keep an
# existing sequence when changing an existing column to be an identity column.
#
# This method can raise an exception in at least the following cases where it may otherwise succeed
# (there may be additional cases not listed here):
#
# * The serial column was added after table creation using PostgreSQL <7.3
# * A regular index also exists on the column (such an index can probably be dropped as the
# primary key index should suffice)
#
# Options:
# :column :: Specify the column to convert instead of using the first primary key column
# :server :: Run the SQL on the given server
def convert_serial_to_identity(table, opts=OPTS)
raise Error, "convert_serial_to_identity is only supported on PostgreSQL 10.2+" unless server_version >= 100002
server = opts[:server]
server_hash = server ? {:server=>server} : OPTS
ds = dataset
ds = ds.server(server) if server
raise Error, "convert_serial_to_identity requires superuser permissions" unless ds.get{current_setting('is_superuser')} == 'on'
table_oid = regclass_oid(table)
im = input_identifier_meth
unless column = (opts[:column] || ((sch = schema(table).find{|_, sc| sc[:primary_key] && sc[:auto_increment]}) && sch[0]))
raise Error, "could not determine column to convert from serial to identity automatically"
end
column = im.call(column)
column_num = ds.from(:pg_attribute).
where(:attrelid=>table_oid, :attname=>column).
get(:attnum)
pg_class = Sequel.cast('pg_class', :regclass)
res = ds.from(:pg_depend).
where(:refclassid=>pg_class, :refobjid=>table_oid, :refobjsubid=>column_num, :classid=>pg_class, :objsubid=>0, :deptype=>%w'a i').
select_map([:objid, Sequel.as({:deptype=>'i'}, :v)])
case res.length
when 0
raise Error, "unable to find related sequence when converting serial to identity"
when 1
seq_oid, already_identity = res.first
else
raise Error, "more than one linked sequence found when converting serial to identity"
end
return if already_identity
transaction(server_hash) do
run("ALTER TABLE #{quote_schema_table(table)} ALTER COLUMN #{quote_identifier(column)} DROP DEFAULT", server_hash)
ds.from(:pg_depend).
where(:classid=>pg_class, :objid=>seq_oid, :objsubid=>0, :deptype=>'a').
update(:deptype=>'i')
ds.from(:pg_attribute).
where(:attrelid=>table_oid, :attname=>column).
update(:attidentity=>'d')
end
remove_cached_schema(table)
nil
end
# Creates the function in the database. Arguments:
# name :: name of the function to create
# definition :: string definition of the function, or object file for a dynamically loaded C function.
# opts :: options hash:
# :args :: function arguments, can be either a symbol or string specifying a type or an array of 1-3 elements:
# 1 :: argument data type
# 2 :: argument name
# 3 :: argument mode (e.g. in, out, inout)
# :behavior :: Should be IMMUTABLE, STABLE, or VOLATILE. PostgreSQL assumes VOLATILE by default.
# :parallel :: The thread safety attribute of the function. Should be SAFE, UNSAFE, RESTRICTED. PostgreSQL assumes UNSAFE by default.
# :cost :: The estimated cost of the function, used by the query planner.
# :language :: The language the function uses. SQL is the default.
# :link_symbol :: For a dynamically loaded see function, the function's link symbol if different from the definition argument.
# :returns :: The data type returned by the function. If you are using OUT or INOUT argument modes, this is ignored.
# Otherwise, if this is not specified, void is used by default to specify the function is not supposed to return a value.
# :rows :: The estimated number of rows the function will return. Only use if the function returns SETOF something.
# :security_definer :: Makes the privileges of the function the same as the privileges of the user who defined the function instead of
# the privileges of the user who runs the function. There are security implications when doing this, see the PostgreSQL documentation.
# :set :: Configuration variables to set while the function is being run, can be a hash or an array of two pairs. search_path is
# often used here if :security_definer is used.
# :strict :: Makes the function return NULL when any argument is NULL.
def create_function(name, definition, opts=OPTS)
self << create_function_sql(name, definition, opts)
end
# Create the procedural language in the database. Arguments:
# name :: Name of the procedural language (e.g. plpgsql)
# opts :: options hash:
# :handler :: The name of a previously registered function used as a call handler for this language.
# :replace :: Replace the installed language if it already exists (on PostgreSQL 9.0+).
# :trusted :: Marks the language being created as trusted, allowing unprivileged users to create functions using this language.
# :validator :: The name of previously registered function used as a validator of functions defined in this language.
def create_language(name, opts=OPTS)
self << create_language_sql(name, opts)
end
# Create a schema in the database. Arguments:
# name :: Name of the schema (e.g. admin)
# opts :: options hash:
# :if_not_exists :: Don't raise an error if the schema already exists (PostgreSQL 9.3+)
# :owner :: The owner to set for the schema (defaults to current user if not specified)
def create_schema(name, opts=OPTS)
self << create_schema_sql(name, opts)
end
# Support partitions of tables using the :partition_of option.
def create_table(name, options=OPTS, &block)
if options[:partition_of]
create_partition_of_table_from_generator(name, CreatePartitionOfTableGenerator.new(&block), options)
return
end
super
end
# Support partitions of tables using the :partition_of option.
def create_table?(name, options=OPTS, &block)
if options[:partition_of]
create_table(name, options.merge!(:if_not_exists=>true), &block)
return
end
super
end
# Create a trigger in the database. Arguments:
# table :: the table on which this trigger operates
# name :: the name of this trigger
# function :: the function to call for this trigger, which should return type trigger.
# opts :: options hash:
# :after :: Calls the trigger after execution instead of before.
# :args :: An argument or array of arguments to pass to the function.
# :each_row :: Calls the trigger for each row instead of for each statement.
# :events :: Can be :insert, :update, :delete, or an array of any of those. Calls the trigger whenever that type of statement is used. By default,
# the trigger is called for insert, update, or delete.
# :replace :: Replace the trigger with the same name if it already exists (PostgreSQL 14+).
# :when :: A filter to use for the trigger
def create_trigger(table, name, function, opts=OPTS)
self << create_trigger_sql(table, name, function, opts)
end
def database_type
:postgres
end
# Use PostgreSQL's DO syntax to execute an anonymous code block. The code should
# be the literal code string to use in the underlying procedural language. Options:
#
# :language :: The procedural language the code is written in. The PostgreSQL
# default is plpgsql. Can be specified as a string or a symbol.
def do(code, opts=OPTS)
language = opts[:language]
run "DO #{"LANGUAGE #{literal(language.to_s)} " if language}#{literal(code)}"
end
# Drops the function from the database. Arguments:
# name :: name of the function to drop
# opts :: options hash:
# :args :: The arguments for the function. See create_function_sql.
# :cascade :: Drop other objects depending on this function.
# :if_exists :: Don't raise an error if the function doesn't exist.
def drop_function(name, opts=OPTS)
self << drop_function_sql(name, opts)
end
# Drops a procedural language from the database. Arguments:
# name :: name of the procedural language to drop
# opts :: options hash:
# :cascade :: Drop other objects depending on this function.
# :if_exists :: Don't raise an error if the function doesn't exist.
def drop_language(name, opts=OPTS)
self << drop_language_sql(name, opts)
end
# Drops a schema from the database. Arguments:
# name :: name of the schema to drop
# opts :: options hash:
# :cascade :: Drop all objects in this schema.
# :if_exists :: Don't raise an error if the schema doesn't exist.
def drop_schema(name, opts=OPTS)
self << drop_schema_sql(name, opts)
end
# Drops a trigger from the database. Arguments:
# table :: table from which to drop the trigger
# name :: name of the trigger to drop
# opts :: options hash:
# :cascade :: Drop other objects depending on this function.
# :if_exists :: Don't raise an error if the function doesn't exist.
def drop_trigger(table, name, opts=OPTS)
self << drop_trigger_sql(table, name, opts)
end
# Return full foreign key information using the pg system tables, including
# :name, :on_delete, :on_update, and :deferrable entries in the hashes.
#
# Supports additional options:
# :reverse :: Instead of returning foreign keys in the current table, return
# foreign keys in other tables that reference the current table.
# :schema :: Set to true to have the :table value in the hashes be a qualified
# identifier. Set to false to use a separate :schema value with
# the related schema. Defaults to whether the given table argument
# is a qualified identifier.
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
schema, _ = opts.fetch(:schema, schema_and_table(table))
h = {}
fklod_map = FOREIGN_KEY_LIST_ON_DELETE_MAP
reverse = opts[:reverse]
(reverse ? _reverse_foreign_key_list_ds : _foreign_key_list_ds).where_each(Sequel[:cl][:oid]=>regclass_oid(table)) do |row|
if reverse
key = [row[:schema], row[:table], row[:name]]
else
key = row[:name]
end
if r = h[key]
r[:columns] << m.call(row[:column])
r[:key] << m.call(row[:refcolumn])
else
entry = h[key] = {
:name=>m.call(row[:name]),
:columns=>[m.call(row[:column])],
:key=>[m.call(row[:refcolumn])],
:on_update=>fklod_map[row[:on_update]],
:on_delete=>fklod_map[row[:on_delete]],
:deferrable=>row[:deferrable],
:table=>schema ? SQL::QualifiedIdentifier.new(m.call(row[:schema]), m.call(row[:table])) : m.call(row[:table]),
}
unless schema
# If not combining schema information into the :table entry
# include it as a separate entry.
entry[:schema] = m.call(row[:schema])
end
end
end
h.values
end
def freeze
server_version
supports_prepared_transactions?
_schema_ds
_select_serial_sequence_ds
_select_custom_sequence_ds
_select_pk_ds
_indexes_ds
_check_constraints_ds
_foreign_key_list_ds
_reverse_foreign_key_list_ds
@conversion_procs.freeze
super
end
# Use the pg_* system tables to determine indexes on a table
def indexes(table, opts=OPTS)
m = output_identifier_meth
cond = {Sequel[:tab][:oid]=>regclass_oid(table, opts)}
cond[:indpred] = nil unless opts[:include_partial]
indexes = {}
_indexes_ds.where_each(cond) do |r|
i = indexes[m.call(r[:name])] ||= {:columns=>[], :unique=>r[:unique], :deferrable=>r[:deferrable]}
i[:columns] << m.call(r[:column])
end
indexes
end
# Dataset containing all current database locks
def locks
dataset.from(:pg_class).join(:pg_locks, :relation=>:relfilenode).select{[pg_class[:relname], Sequel::SQL::ColumnAll.new(:pg_locks)]}
end
# Notifies the given channel. See the PostgreSQL NOTIFY documentation. Options:
#
# :payload :: The payload string to use for the NOTIFY statement. Only supported
# in PostgreSQL 9.0+.
# :server :: The server to which to send the NOTIFY statement, if the sharding support
# is being used.
def notify(channel, opts=OPTS)
sql = String.new
sql << "NOTIFY "
dataset.send(:identifier_append, sql, channel)
if payload = opts[:payload]
sql << ", "
dataset.literal_append(sql, payload.to_s)
end
execute_ddl(sql, opts)
end
# Return primary key for the given table.
def primary_key(table, opts=OPTS)
quoted_table = quote_schema_table(table)
Sequel.synchronize{return @primary_keys[quoted_table] if @primary_keys.has_key?(quoted_table)}
value = _select_pk_ds.where_single_value(Sequel[:pg_class][:oid] => regclass_oid(table, opts))
Sequel.synchronize{@primary_keys[quoted_table] = value}
end
# Return the sequence providing the default for the primary key for the given table.
def primary_key_sequence(table, opts=OPTS)
quoted_table = quote_schema_table(table)
Sequel.synchronize{return @primary_key_sequences[quoted_table] if @primary_key_sequences.has_key?(quoted_table)}
cond = {Sequel[:t][:oid] => regclass_oid(table, opts)}
value = if pks = _select_serial_sequence_ds.first(cond)
literal(SQL::QualifiedIdentifier.new(pks[:schema], pks[:sequence]))
elsif pks = _select_custom_sequence_ds.first(cond)
literal(SQL::QualifiedIdentifier.new(pks[:schema], LiteralString.new(pks[:sequence])))
end
Sequel.synchronize{@primary_key_sequences[quoted_table] = value} if value
end
# Refresh the materialized view with the given name.
#
# DB.refresh_view(:items_view)
# # REFRESH MATERIALIZED VIEW items_view
# DB.refresh_view(:items_view, concurrently: true)
# # REFRESH MATERIALIZED VIEW CONCURRENTLY items_view
def refresh_view(name, opts=OPTS)
run "REFRESH MATERIALIZED VIEW#{' CONCURRENTLY' if opts[:concurrently]} #{quote_schema_table(name)}"
end
# Reset the primary key sequence for the given table, basing it on the
# maximum current value of the table's primary key.
def reset_primary_key_sequence(table)
return unless seq = primary_key_sequence(table)
pk = SQL::Identifier.new(primary_key(table))
db = self
s, t = schema_and_table(table)
table = Sequel.qualify(s, t) if s
if server_version >= 100000
seq_ds = metadata_dataset.from(:pg_sequence).where(:seqrelid=>regclass_oid(LiteralString.new(seq)))
increment_by = :seqincrement
min_value = :seqmin
# :nocov:
else
seq_ds = metadata_dataset.from(LiteralString.new(seq))
increment_by = :increment_by
min_value = :min_value
# :nocov:
end
get{setval(seq, db[table].select(coalesce(max(pk)+seq_ds.select(increment_by), seq_ds.select(min_value))), false)}
end
def rollback_prepared_transaction(transaction_id, opts=OPTS)
run("ROLLBACK PREPARED #{literal(transaction_id)}", opts)
end
# PostgreSQL uses SERIAL psuedo-type instead of AUTOINCREMENT for
# managing incrementing primary keys.
def serial_primary_key_options
# :nocov:
auto_increment_key = server_version >= 100002 ? :identity : :serial
# :nocov:
{:primary_key => true, auto_increment_key => true, :type=>Integer}
end
# The version of the PostgreSQL server, used for determining capability.
def server_version(server=nil)
return @server_version if @server_version
ds = dataset
ds = ds.server(server) if server
@server_version = swallow_database_error{ds.with_sql("SELECT CAST(current_setting('server_version_num') AS integer) AS v").single_value} || 0
end
# PostgreSQL supports CREATE TABLE IF NOT EXISTS on 9.1+
def supports_create_table_if_not_exists?
server_version >= 90100
end
# PostgreSQL 9.0+ supports some types of deferrable constraints beyond foreign key constraints.
def supports_deferrable_constraints?
server_version >= 90000
end
# PostgreSQL supports deferrable foreign key constraints.
def supports_deferrable_foreign_key_constraints?
true
end
# PostgreSQL supports DROP TABLE IF EXISTS
def supports_drop_table_if_exists?
true
end
# PostgreSQL supports partial indexes.
def supports_partial_indexes?
true
end
# PostgreSQL 9.0+ supports trigger conditions.
def supports_trigger_conditions?
server_version >= 90000
end
# PostgreSQL supports prepared transactions (two-phase commit) if
# max_prepared_transactions is greater than 0.
def supports_prepared_transactions?
return @supports_prepared_transactions if defined?(@supports_prepared_transactions)
@supports_prepared_transactions = self['SHOW max_prepared_transactions'].get.to_i > 0
end
# PostgreSQL supports savepoints
def supports_savepoints?
true
end
# PostgreSQL supports transaction isolation levels
def supports_transaction_isolation_levels?
true
end
# PostgreSQL supports transaction DDL statements.
def supports_transactional_ddl?
true
end
# Array of symbols specifying table names in the current database.
# The dataset used is yielded to the block if one is provided,
# otherwise, an array of symbols of table names is returned.
#
# Options:
# :qualify :: Return the tables as Sequel::SQL::QualifiedIdentifier instances,
# using the schema the table is located in as the qualifier.
# :schema :: The schema to search
# :server :: The server to use
def tables(opts=OPTS, &block)
pg_class_relname(['r', 'p'], opts, &block)
end
# Check whether the given type name string/symbol (e.g. :hstore) is supported by
# the database.
def type_supported?(type)
Sequel.synchronize{return @supported_types[type] if @supported_types.has_key?(type)}
supported = from(:pg_type).where(:typtype=>'b', :typname=>type.to_s).count > 0
Sequel.synchronize{return @supported_types[type] = supported}
end
# Creates a dataset that uses the VALUES clause:
#
# DB.values([[1, 2], [3, 4]])
# # VALUES ((1, 2), (3, 4))
#
# DB.values([[1, 2], [3, 4]]).order(:column2).limit(1, 1)
# # VALUES ((1, 2), (3, 4)) ORDER BY column2 LIMIT 1 OFFSET 1
def values(v)
@default_dataset.clone(:values=>v)
end
# Array of symbols specifying view names in the current database.
#
# Options:
# :materialized :: Return materialized views
# :qualify :: Return the views as Sequel::SQL::QualifiedIdentifier instances,
# using the schema the view is located in as the qualifier.
# :schema :: The schema to search
# :server :: The server to use
def views(opts=OPTS)
relkind = opts[:materialized] ? 'm' : 'v'
pg_class_relname(relkind, opts)
end
private
# Dataset used to retrieve CHECK constraint information
def _check_constraints_ds
@_check_constraints_ds ||= metadata_dataset.
from{pg_constraint.as(:co)}.
left_join(Sequel[:pg_attribute].as(:att), :attrelid=>:conrelid, :attnum=>SQL::Function.new(:ANY, Sequel[:co][:conkey])).
where(:contype=>'c').
select{[co[:conname].as(:constraint), att[:attname].as(:column), pg_get_constraintdef(co[:oid]).as(:definition)]}
end
# Dataset used to retrieve foreign keys referenced by a table
def _foreign_key_list_ds
@_foreign_key_list_ds ||= __foreign_key_list_ds(false)
end
# Dataset used to retrieve foreign keys referencing a table
def _reverse_foreign_key_list_ds
@_reverse_foreign_key_list_ds ||= __foreign_key_list_ds(true)
end
# Build dataset used for foreign key list methods.
def __foreign_key_list_ds(reverse)
if reverse
ctable = Sequel[:att2]
cclass = Sequel[:cl2]
rtable = Sequel[:att]
rclass = Sequel[:cl]
else
ctable = Sequel[:att]
cclass = Sequel[:cl]
rtable = Sequel[:att2]
rclass = Sequel[:cl2]
end
if server_version >= 90500
cpos = Sequel.expr{array_position(co[:conkey], ctable[:attnum])}
rpos = Sequel.expr{array_position(co[:confkey], rtable[:attnum])}
# :nocov:
else
range = 0...32
cpos = Sequel.expr{SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(co[:conkey], [x]), x]}, 32, ctable[:attnum])}
rpos = Sequel.expr{SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(co[:confkey], [x]), x]}, 32, rtable[:attnum])}
# :nocov:
end
ds = metadata_dataset.
from{pg_constraint.as(:co)}.
join(Sequel[:pg_class].as(cclass), :oid=>:conrelid).
join(Sequel[:pg_attribute].as(ctable), :attrelid=>:oid, :attnum=>SQL::Function.new(:ANY, Sequel[:co][:conkey])).
join(Sequel[:pg_class].as(rclass), :oid=>Sequel[:co][:confrelid]).
join(Sequel[:pg_attribute].as(rtable), :attrelid=>:oid, :attnum=>SQL::Function.new(:ANY, Sequel[:co][:confkey])).
join(Sequel[:pg_namespace].as(:nsp), :oid=>Sequel[:cl2][:relnamespace]).
order{[co[:conname], cpos]}.
where{{
cl[:relkind]=>%w'r p',
co[:contype]=>'f',
cpos=>rpos
}}.
select{[
co[:conname].as(:name),
ctable[:attname].as(:column),
co[:confupdtype].as(:on_update),
co[:confdeltype].as(:on_delete),
cl2[:relname].as(:table),
rtable[:attname].as(:refcolumn),
SQL::BooleanExpression.new(:AND, co[:condeferrable], co[:condeferred]).as(:deferrable),
nsp[:nspname].as(:schema)
]}
if reverse
ds = ds.order_append(Sequel[:nsp][:nspname], Sequel[:cl2][:relname])
end
ds
end
# Dataset used to retrieve index information
def _indexes_ds
@_indexes_ds ||= begin
if server_version >= 90500
order = [Sequel[:indc][:relname], Sequel.function(:array_position, Sequel[:ind][:indkey], Sequel[:att][:attnum])]
# :nocov:
else
range = 0...32
order = [Sequel[:indc][:relname], SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(Sequel[:ind][:indkey], [x]), x]}, 32, Sequel[:att][:attnum])]
# :nocov:
end
attnums = SQL::Function.new(:ANY, Sequel[:ind][:indkey])
ds = metadata_dataset.
from{pg_class.as(:tab)}.
join(Sequel[:pg_index].as(:ind), :indrelid=>:oid).
join(Sequel[:pg_class].as(:indc), :oid=>:indexrelid).
join(Sequel[:pg_attribute].as(:att), :attrelid=>Sequel[:tab][:oid], :attnum=>attnums).
left_join(Sequel[:pg_constraint].as(:con), :conname=>Sequel[:indc][:relname]).
where{{
indc[:relkind]=>'i',
ind[:indisprimary]=>false,
:indexprs=>nil,
:indisvalid=>true}}.
order(*order).
select{[indc[:relname].as(:name), ind[:indisunique].as(:unique), att[:attname].as(:column), con[:condeferrable].as(:deferrable)]}
# :nocov:
ds = ds.where(:indisready=>true) if server_version >= 80300
ds = ds.where(:indislive=>true) if server_version >= 90300
# :nocov:
ds
end
end
# Dataset used to determine custom serial sequences for tables
def _select_custom_sequence_ds
@_select_custom_sequence_ds ||= metadata_dataset.
from{pg_class.as(:t)}.
join(:pg_namespace, {:oid => :relnamespace}, :table_alias=>:name).
join(:pg_attribute, {:attrelid => Sequel[:t][:oid]}, :table_alias=>:attr).
join(:pg_attrdef, {:adrelid => :attrelid, :adnum => :attnum}, :table_alias=>:def).
join(:pg_constraint, {:conrelid => :adrelid, Sequel[:cons][:conkey].sql_subscript(1) => :adnum}, :table_alias=>:cons).
where{{cons[:contype] => 'p', pg_get_expr(self.def[:adbin], attr[:attrelid]) => /nextval/i}}.
select{
expr = split_part(pg_get_expr(self.def[:adbin], attr[:attrelid]), "'", 2)
[
name[:nspname].as(:schema),
Sequel.case({{expr => /./} => substr(expr, strpos(expr, '.')+1)}, expr).as(:sequence)
]
}
end
# Dataset used to determine normal serial sequences for tables
def _select_serial_sequence_ds
@_serial_sequence_ds ||= metadata_dataset.
from{[
pg_class.as(:seq),
pg_attribute.as(:attr),
pg_depend.as(:dep),
pg_namespace.as(:name),
pg_constraint.as(:cons),
pg_class.as(:t)
]}.
where{[
[seq[:oid], dep[:objid]],
[seq[:relnamespace], name[:oid]],
[seq[:relkind], 'S'],
[attr[:attrelid], dep[:refobjid]],
[attr[:attnum], dep[:refobjsubid]],
[attr[:attrelid], cons[:conrelid]],
[attr[:attnum], cons[:conkey].sql_subscript(1)],
[attr[:attrelid], t[:oid]],
[cons[:contype], 'p']
]}.
select{[
name[:nspname].as(:schema),
seq[:relname].as(:sequence)
]}
end
# Dataset used to determine primary keys for tables
def _select_pk_ds
@_select_pk_ds ||= metadata_dataset.
from(:pg_class, :pg_attribute, :pg_index, :pg_namespace).
where{[
[pg_class[:oid], pg_attribute[:attrelid]],
[pg_class[:relnamespace], pg_namespace[:oid]],
[pg_class[:oid], pg_index[:indrelid]],
[pg_index[:indkey].sql_subscript(0), pg_attribute[:attnum]],
[pg_index[:indisprimary], 't']
]}.
select{pg_attribute[:attname].as(:pk)}
end
# Dataset used to get schema for tables
def _schema_ds
@_schema_ds ||= begin
ds = metadata_dataset.select{[
pg_attribute[:attname].as(:name),
SQL::Cast.new(pg_attribute[:atttypid], :integer).as(:oid),
SQL::Cast.new(basetype[:oid], :integer).as(:base_oid),
SQL::Function.new(:format_type, basetype[:oid], pg_type[:typtypmod]).as(:db_base_type),
SQL::Function.new(:format_type, pg_type[:oid], pg_attribute[:atttypmod]).as(:db_type),
SQL::Function.new(:pg_get_expr, pg_attrdef[:adbin], pg_class[:oid]).as(:default),
SQL::BooleanExpression.new(:NOT, pg_attribute[:attnotnull]).as(:allow_null),
SQL::Function.new(:COALESCE, SQL::BooleanExpression.from_value_pairs(pg_attribute[:attnum] => SQL::Function.new(:ANY, pg_index[:indkey])), false).as(:primary_key)]}.
from(:pg_class).
join(:pg_attribute, :attrelid=>:oid).
join(:pg_type, :oid=>:atttypid).
left_outer_join(Sequel[:pg_type].as(:basetype), :oid=>:typbasetype).
left_outer_join(:pg_attrdef, :adrelid=>Sequel[:pg_class][:oid], :adnum=>Sequel[:pg_attribute][:attnum]).
left_outer_join(:pg_index, :indrelid=>Sequel[:pg_class][:oid], :indisprimary=>true).
where{{pg_attribute[:attisdropped]=>false}}.
where{pg_attribute[:attnum] > 0}.
order{pg_attribute[:attnum]}
# :nocov:
if server_version > 100000
# :nocov:
ds = ds.select_append{pg_attribute[:attidentity]}
# :nocov:
if server_version > 120000
# :nocov:
ds = ds.select_append{Sequel.~(pg_attribute[:attgenerated]=>'').as(:generated)}
end
end
ds
end
end
def alter_table_add_column_sql(table, op)
"ADD COLUMN#{' IF NOT EXISTS' if op[:if_not_exists]} #{column_definition_sql(op)}"
end
def alter_table_generator_class
Postgres::AlterTableGenerator
end
def alter_table_set_column_type_sql(table, op)
s = super
if using = op[:using]
using = Sequel::LiteralString.new(using) if using.is_a?(String)
s += ' USING '
s << literal(using)
end
s
end
def alter_table_drop_column_sql(table, op)
"DROP COLUMN #{'IF EXISTS ' if op[:if_exists]}#{quote_identifier(op[:name])}#{' CASCADE' if op[:cascade]}"
end
def alter_table_validate_constraint_sql(table, op)
"VALIDATE CONSTRAINT #{quote_identifier(op[:name])}"
end
# If the :synchronous option is given and non-nil, set synchronous_commit
# appropriately. Valid values for the :synchronous option are true,
# :on, false, :off, :local, and :remote_write.
def begin_new_transaction(conn, opts)
super
if opts.has_key?(:synchronous)
case sync = opts[:synchronous]
when true
sync = :on
when false
sync = :off
when nil
return
end
log_connection_execute(conn, "SET LOCAL synchronous_commit = #{sync}")
end
end
# Set the READ ONLY transaction setting per savepoint, as PostgreSQL supports that.
def begin_savepoint(conn, opts)
super
unless (read_only = opts[:read_only]).nil?
log_connection_execute(conn, "SET TRANSACTION READ #{read_only ? 'ONLY' : 'WRITE'}")
end
end
# Literalize non-String collate options. This is because unquoted collatations
# are folded to lowercase, and PostgreSQL used mixed case or capitalized collations.
def column_definition_collate_sql(sql, column)
if collate = column[:collate]
collate = literal(collate) unless collate.is_a?(String)
sql << " COLLATE #{collate}"
end
end
# Support identity columns, but only use the identity SQL syntax if no
# default value is given.
def column_definition_default_sql(sql, column)
super
if !column[:serial] && !['smallserial', 'serial', 'bigserial'].include?(column[:type].to_s) && !column[:default]
if (identity = column[:identity])
sql << " GENERATED "
sql << (identity == :always ? "ALWAYS" : "BY DEFAULT")
sql << " AS IDENTITY"
elsif (generated = column[:generated_always_as])
sql << " GENERATED ALWAYS AS (#{literal(generated)}) STORED"
end
end
end
# Handle PostgreSQL specific default format.
def column_schema_normalize_default(default, type)
if m = /\A(?:B?('.*')::[^']+|\((-?\d+(?:\.\d+)?)\))\z/.match(default)
default = m[1] || m[2]
end
super(default, type)
end
# If the :prepare option is given and we aren't in a savepoint,
# prepare the transaction for a two-phase commit.
def commit_transaction(conn, opts=OPTS)
if (s = opts[:prepare]) && savepoint_level(conn) <= 1
log_connection_execute(conn, "PREPARE TRANSACTION #{literal(s)}")
else
super
end
end
# PostgreSQL can't combine rename_column operations, and it can combine
# the custom validate_constraint operation.
def combinable_alter_table_op?(op)
(super || op[:op] == :validate_constraint) && op[:op] != :rename_column
end
VALID_CLIENT_MIN_MESSAGES = %w'DEBUG5 DEBUG4 DEBUG3 DEBUG2 DEBUG1 LOG NOTICE WARNING ERROR FATAL PANIC'.freeze.each(&:freeze)
# The SQL queries to execute when starting a new connection.
def connection_configuration_sqls(opts=@opts)
sqls = []
sqls << "SET standard_conforming_strings = ON" if typecast_value_boolean(opts.fetch(:force_standard_strings, true))
cmm = opts.fetch(:client_min_messages, :warning)
if cmm && !cmm.to_s.empty?
cmm = cmm.to_s.upcase.strip
unless VALID_CLIENT_MIN_MESSAGES.include?(cmm)
raise Error, "Unsupported client_min_messages setting: #{cmm}"
end
sqls << "SET client_min_messages = '#{cmm.to_s.upcase}'"
end
if search_path = opts[:search_path]
case search_path
when String
search_path = search_path.split(",").map(&:strip)
when Array
# nil
else
raise Error, "unrecognized value for :search_path option: #{search_path.inspect}"
end
sqls << "SET search_path = #{search_path.map{|s| "\"#{s.gsub('"', '""')}\""}.join(',')}"
end
sqls
end
# Handle exclusion constraints.
def constraint_definition_sql(constraint)
case constraint[:type]
when :exclude
elements = constraint[:elements].map{|c, op| "#{literal(c)} WITH #{op}"}.join(', ')
sql = String.new
sql << "#{"CONSTRAINT #{quote_identifier(constraint[:name])} " if constraint[:name]}EXCLUDE USING #{constraint[:using]||'gist'} (#{elements})#{" WHERE #{filter_expr(constraint[:where])}" if constraint[:where]}"
constraint_deferrable_sql_append(sql, constraint[:deferrable])
sql
when :foreign_key, :check
sql = super
if constraint[:not_valid]
sql << " NOT VALID"
end
sql
else
super
end
end
def database_specific_error_class_from_sqlstate(sqlstate)
if sqlstate == '23P01'
ExclusionConstraintViolation
elsif sqlstate == '40P01'
SerializationFailure
elsif sqlstate == '55P03'
DatabaseLockTimeout
else
super
end
end
DATABASE_ERROR_REGEXPS = [
# Add this check first, since otherwise it's possible for users to control
# which exception class is generated.
[/invalid input syntax/, DatabaseError],
[/duplicate key value violates unique constraint/, UniqueConstraintViolation],
[/violates foreign key constraint/, ForeignKeyConstraintViolation],
[/violates check constraint/, CheckConstraintViolation],
[/violates not-null constraint/, NotNullConstraintViolation],
[/conflicting key value violates exclusion constraint/, ExclusionConstraintViolation],
[/could not serialize access/, SerializationFailure],
[/could not obtain lock on row in relation/, DatabaseLockTimeout],
].freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# SQL for doing fast table insert from stdin.
def copy_into_sql(table, opts)
sql = String.new
sql << "COPY #{literal(table)}"
if cols = opts[:columns]
sql << literal(Array(cols))
end
sql << " FROM STDIN"
if opts[:options] || opts[:format]
sql << " ("
sql << "FORMAT #{opts[:format]}" if opts[:format]
sql << "#{', ' if opts[:format]}#{opts[:options]}" if opts[:options]
sql << ')'
end
sql
end
# SQL for doing fast table output to stdout.
def copy_table_sql(table, opts)
if table.is_a?(String)
table
else
if opts[:options] || opts[:format]
options = String.new
options << " ("
options << "FORMAT #{opts[:format]}" if opts[:format]
options << "#{', ' if opts[:format]}#{opts[:options]}" if opts[:options]
options << ')'
end
table = if table.is_a?(::Sequel::Dataset)
"(#{table.sql})"
else
literal(table)
end
"COPY #{table} TO STDOUT#{options}"
end
end
# SQL statement to create database function.
def create_function_sql(name, definition, opts=OPTS)
args = opts[:args]
if !opts[:args].is_a?(Array) || !opts[:args].any?{|a| Array(a).length == 3 and %w'OUT INOUT'.include?(a[2].to_s)}
returns = opts[:returns] || 'void'
end
language = opts[:language] || 'SQL'
<<-END
CREATE#{' OR REPLACE' if opts[:replace]} FUNCTION #{name}#{sql_function_args(args)}
#{"RETURNS #{returns}" if returns}
LANGUAGE #{language}
#{opts[:behavior].to_s.upcase if opts[:behavior]}
#{'STRICT' if opts[:strict]}
#{'SECURITY DEFINER' if opts[:security_definer]}
#{"PARALLEL #{opts[:parallel].to_s.upcase}" if opts[:parallel]}
#{"COST #{opts[:cost]}" if opts[:cost]}
#{"ROWS #{opts[:rows]}" if opts[:rows]}
#{opts[:set].map{|k,v| " SET #{k} = #{v}"}.join("\n") if opts[:set]}
AS #{literal(definition.to_s)}#{", #{literal(opts[:link_symbol].to_s)}" if opts[:link_symbol]}
END
end
# SQL for creating a procedural language.
def create_language_sql(name, opts=OPTS)
"CREATE#{' OR REPLACE' if opts[:replace] && server_version >= 90000}#{' TRUSTED' if opts[:trusted]} LANGUAGE #{name}#{" HANDLER #{opts[:handler]}" if opts[:handler]}#{" VALIDATOR #{opts[:validator]}" if opts[:validator]}"
end
# Create a partition of another table, used when the create_table with
# the :partition_of option is given.
def create_partition_of_table_from_generator(name, generator, options)
execute_ddl(create_partition_of_table_sql(name, generator, options))
end
# SQL for creating a partition of another table.
def create_partition_of_table_sql(name, generator, options)
sql = create_table_prefix_sql(name, options).dup
sql << " PARTITION OF #{quote_schema_table(options[:partition_of])}"
case generator.partition_type
when :range
from, to = generator.range
sql << " FOR VALUES FROM #{literal(from)} TO #{literal(to)}"
when :list
sql << " FOR VALUES IN #{literal(generator.list)}"
when :hash
mod, remainder = generator.hash_values
sql << " FOR VALUES WITH (MODULUS #{literal(mod)}, REMAINDER #{literal(remainder)})"
else # when :default
sql << " DEFAULT"
end
sql << create_table_suffix_sql(name, options)
sql
end
# SQL for creating a schema.
def create_schema_sql(name, opts=OPTS)
"CREATE SCHEMA #{'IF NOT EXISTS ' if opts[:if_not_exists]}#{quote_identifier(name)}#{" AUTHORIZATION #{literal(opts[:owner])}" if opts[:owner]}"
end
# DDL statement for creating a table with the given name, columns, and options
def create_table_prefix_sql(name, options)
prefix_sql = if options[:temp]
raise(Error, "can't provide both :temp and :unlogged to create_table") if options[:unlogged]
raise(Error, "can't provide both :temp and :foreign to create_table") if options[:foreign]
temporary_table_sql
elsif options[:foreign]
raise(Error, "can't provide both :foreign and :unlogged to create_table") if options[:unlogged]
'FOREIGN '
elsif options[:unlogged]
'UNLOGGED '
end
"CREATE #{prefix_sql}TABLE#{' IF NOT EXISTS' if options[:if_not_exists]} #{options[:temp] ? quote_identifier(name) : quote_schema_table(name)}"
end
# SQL for creating a table with PostgreSQL specific options
def create_table_sql(name, generator, options)
"#{super}#{create_table_suffix_sql(name, options)}"
end
# Handle various PostgreSQl specific table extensions such as inheritance,
# partitioning, tablespaces, and foreign tables.
def create_table_suffix_sql(name, options)
sql = String.new
if inherits = options[:inherits]
sql << " INHERITS (#{Array(inherits).map{|t| quote_schema_table(t)}.join(', ')})"
end
if partition_by = options[:partition_by]
sql << " PARTITION BY #{options[:partition_type]||'RANGE'} #{literal(Array(partition_by))}"
end
if on_commit = options[:on_commit]
raise(Error, "can't provide :on_commit without :temp to create_table") unless options[:temp]
raise(Error, "unsupported on_commit option: #{on_commit.inspect}") unless ON_COMMIT.has_key?(on_commit)
sql << " ON COMMIT #{ON_COMMIT[on_commit]}"
end
if tablespace = options[:tablespace]
sql << " TABLESPACE #{quote_identifier(tablespace)}"
end
if server = options[:foreign]
sql << " SERVER #{quote_identifier(server)}"
if foreign_opts = options[:options]
sql << " OPTIONS (#{foreign_opts.map{|k, v| "#{k} #{literal(v.to_s)}"}.join(', ')})"
end
end
sql
end
def create_table_as_sql(name, sql, options)
result = create_table_prefix_sql name, options
if on_commit = options[:on_commit]
result += " ON COMMIT #{ON_COMMIT[on_commit]}"
end
result += " AS #{sql}"
end
def create_table_generator_class
Postgres::CreateTableGenerator
end
# SQL for creating a database trigger.
def create_trigger_sql(table, name, function, opts=OPTS)
events = opts[:events] ? Array(opts[:events]) : [:insert, :update, :delete]
whence = opts[:after] ? 'AFTER' : 'BEFORE'
if filter = opts[:when]
raise Error, "Trigger conditions are not supported for this database" unless supports_trigger_conditions?
filter = " WHEN #{filter_expr(filter)}"
end
"CREATE #{'OR REPLACE ' if opts[:replace]}TRIGGER #{name} #{whence} #{events.map{|e| e.to_s.upcase}.join(' OR ')} ON #{quote_schema_table(table)}#{' FOR EACH ROW' if opts[:each_row]}#{filter} EXECUTE PROCEDURE #{function}(#{Array(opts[:args]).map{|a| literal(a)}.join(', ')})"
end
# DDL fragment for initial part of CREATE VIEW statement
def create_view_prefix_sql(name, options)
sql = create_view_sql_append_columns("CREATE #{'OR REPLACE 'if options[:replace]}#{'TEMPORARY 'if options[:temp]}#{'RECURSIVE ' if options[:recursive]}#{'MATERIALIZED ' if options[:materialized]}VIEW #{quote_schema_table(name)}", options[:columns] || options[:recursive])
if options[:security_invoker]
sql += " WITH (security_invoker)"
end
if tablespace = options[:tablespace]
sql += " TABLESPACE #{quote_identifier(tablespace)}"
end
sql
end
# SQL for dropping a function from the database.
def drop_function_sql(name, opts=OPTS)
"DROP FUNCTION#{' IF EXISTS' if opts[:if_exists]} #{name}#{sql_function_args(opts[:args])}#{' CASCADE' if opts[:cascade]}"
end
# Support :if_exists, :cascade, and :concurrently options.
def drop_index_sql(table, op)
sch, _ = schema_and_table(table)
"DROP INDEX#{' CONCURRENTLY' if op[:concurrently]}#{' IF EXISTS' if op[:if_exists]} #{"#{quote_identifier(sch)}." if sch}#{quote_identifier(op[:name] || default_index_name(table, op[:columns]))}#{' CASCADE' if op[:cascade]}"
end
# SQL for dropping a procedural language from the database.
def drop_language_sql(name, opts=OPTS)
"DROP LANGUAGE#{' IF EXISTS' if opts[:if_exists]} #{name}#{' CASCADE' if opts[:cascade]}"
end
# SQL for dropping a schema from the database.
def drop_schema_sql(name, opts=OPTS)
"DROP SCHEMA#{' IF EXISTS' if opts[:if_exists]} #{quote_identifier(name)}#{' CASCADE' if opts[:cascade]}"
end
# SQL for dropping a trigger from the database.
def drop_trigger_sql(table, name, opts=OPTS)
"DROP TRIGGER#{' IF EXISTS' if opts[:if_exists]} #{name} ON #{quote_schema_table(table)}#{' CASCADE' if opts[:cascade]}"
end
# Support :foreign tables
def drop_table_sql(name, options)
"DROP#{' FOREIGN' if options[:foreign]} TABLE#{' IF EXISTS' if options[:if_exists]} #{quote_schema_table(name)}#{' CASCADE' if options[:cascade]}"
end
# SQL for dropping a view from the database.
def drop_view_sql(name, opts=OPTS)
"DROP #{'MATERIALIZED ' if opts[:materialized]}VIEW#{' IF EXISTS' if opts[:if_exists]} #{quote_schema_table(name)}#{' CASCADE' if opts[:cascade]}"
end
# If opts includes a :schema option, use it, otherwise restrict the filter to only the
# currently visible schemas.
def filter_schema(ds, opts)
expr = if schema = opts[:schema]
schema.to_s
else
Sequel.function(:any, Sequel.function(:current_schemas, false))
end
ds.where{{pg_namespace[:nspname]=>expr}}
end
def index_definition_sql(table_name, index)
cols = index[:columns]
index_name = index[:name] || default_index_name(table_name, cols)
expr = if o = index[:opclass]
"(#{Array(cols).map{|c| "#{literal(c)} #{o}"}.join(', ')})"
else
literal(Array(cols))
end
if_not_exists = " IF NOT EXISTS" if index[:if_not_exists]
unique = "UNIQUE " if index[:unique]
index_type = index[:type]
filter = index[:where] || index[:filter]
filter = " WHERE #{filter_expr(filter)}" if filter
nulls_distinct = " NULLS#{' NOT' if index[:nulls_distinct] == false} DISTINCT" unless index[:nulls_distinct].nil?
case index_type
when :full_text
expr = "(to_tsvector(#{literal(index[:language] || 'simple')}::regconfig, #{literal(dataset.send(:full_text_string_join, cols))}))"
index_type = index[:index_type] || :gin
when :spatial
index_type = :gist
end
"CREATE #{unique}INDEX#{' CONCURRENTLY' if index[:concurrently]}#{if_not_exists} #{quote_identifier(index_name)} ON #{quote_schema_table(table_name)} #{"USING #{index_type} " if index_type}#{expr}#{" INCLUDE #{literal(Array(index[:include]))}" if index[:include]}#{nulls_distinct}#{" TABLESPACE #{quote_identifier(index[:tablespace])}" if index[:tablespace]}#{filter}"
end
# Setup datastructures shared by all postgres adapters.
def initialize_postgres_adapter
@primary_keys = {}
@primary_key_sequences = {}
@supported_types = {}
procs = @conversion_procs = CONVERSION_PROCS.dup
procs[1184] = procs[1114] = method(:to_application_timestamp)
end
# Backbone of the tables and views support.
def pg_class_relname(type, opts)
ds = metadata_dataset.from(:pg_class).where(:relkind=>type).select(:relname).server(opts[:server]).join(:pg_namespace, :oid=>:relnamespace)
ds = filter_schema(ds, opts)
m = output_identifier_meth
if defined?(yield)
yield(ds)
elsif opts[:qualify]
ds.select_append{pg_namespace[:nspname]}.map{|r| Sequel.qualify(m.call(r[:nspname]).to_s, m.call(r[:relname]).to_s)}
else
ds.map{|r| m.call(r[:relname])}
end
end
# Return an expression the oid for the table expr. Used by the metadata parsing
# code to disambiguate unqualified tables.
def regclass_oid(expr, opts=OPTS)
if expr.is_a?(String) && !expr.is_a?(LiteralString)
expr = Sequel.identifier(expr)
end
sch, table = schema_and_table(expr)
sch ||= opts[:schema]
if sch
expr = Sequel.qualify(sch, table)
end
expr = if ds = opts[:dataset]
ds.literal(expr)
else
literal(expr)
end
Sequel.cast(expr.to_s,:regclass).cast(:oid)
end
# Remove the cached entries for primary keys and sequences when a table is changed.
def remove_cached_schema(table)
tab = quote_schema_table(table)
Sequel.synchronize do
@primary_keys.delete(tab)
@primary_key_sequences.delete(tab)
end
super
end
# SQL DDL statement for renaming a table. PostgreSQL doesn't allow you to change a table's schema in
# a rename table operation, so speciying a new schema in new_name will not have an effect.
def rename_table_sql(name, new_name)
"ALTER TABLE #{quote_schema_table(name)} RENAME TO #{quote_identifier(schema_and_table(new_name).last)}"
end
def schema_column_type(db_type)
case db_type
when /\Ainterval\z/io
:interval
when /\Acitext\z/io
:string
else
super
end
end
# The dataset used for parsing table schemas, using the pg_* system catalogs.
def schema_parse_table(table_name, opts)
m = output_identifier_meth(opts[:dataset])
_schema_ds.where_all(Sequel[:pg_class][:oid]=>regclass_oid(table_name, opts)).map do |row|
row[:default] = nil if blank_object?(row[:default])
if row[:base_oid]
row[:domain_oid] = row[:oid]
row[:oid] = row.delete(:base_oid)
row[:db_domain_type] = row[:db_type]
row[:db_type] = row.delete(:db_base_type)
else
row.delete(:base_oid)
row.delete(:db_base_type)
end
row[:type] = schema_column_type(row[:db_type])
identity = row.delete(:attidentity)
if row[:primary_key]
row[:auto_increment] = !!(row[:default] =~ /\A(?:nextval)/i) || identity == 'a' || identity == 'd'
end
[m.call(row.delete(:name)), row]
end
end
# Set the transaction isolation level on the given connection
def set_transaction_isolation(conn, opts)
level = opts.fetch(:isolation, transaction_isolation_level)
read_only = opts[:read_only]
deferrable = opts[:deferrable]
if level || !read_only.nil? || !deferrable.nil?
sql = String.new
sql << "SET TRANSACTION"
sql << " ISOLATION LEVEL #{Sequel::Database::TRANSACTION_ISOLATION_LEVELS[level]}" if level
sql << " READ #{read_only ? 'ONLY' : 'WRITE'}" unless read_only.nil?
sql << " #{'NOT ' unless deferrable}DEFERRABLE" unless deferrable.nil?
log_connection_execute(conn, sql)
end
end
# Turns an array of argument specifiers into an SQL fragment used for function arguments. See create_function_sql.
def sql_function_args(args)
"(#{Array(args).map{|a| Array(a).reverse.join(' ')}.join(', ')})"
end
# PostgreSQL can combine multiple alter table ops into a single query.
def supports_combining_alter_table_ops?
true
end
# PostgreSQL supports CREATE OR REPLACE VIEW.
def supports_create_or_replace_view?
true
end
# Handle bigserial type if :serial option is present
def type_literal_generic_bignum_symbol(column)
column[:serial] ? :bigserial : super
end
# PostgreSQL uses the bytea data type for blobs
def type_literal_generic_file(column)
:bytea
end
# Handle serial type if :serial option is present
def type_literal_generic_integer(column)
column[:serial] ? :serial : super
end
# PostgreSQL prefers the text datatype. If a fixed size is requested,
# the char type is used. If the text type is specifically
# disallowed or there is a size specified, use the varchar type.
# Otherwise use the text type.
def type_literal_generic_string(column)
if column[:text]
:text
elsif column[:fixed]
"char(#{column[:size]||default_string_column_size})"
elsif column[:text] == false || column[:size]
"varchar(#{column[:size]||default_string_column_size})"
else
:text
end
end
# PostgreSQL 9.4+ supports views with check option.
def view_with_check_option_support
# :nocov:
:local if server_version >= 90400
# :nocov:
end
end
module DatasetMethods
include UnmodifiedIdentifiers::DatasetMethods
NULL = LiteralString.new('NULL').freeze
LOCK_MODES = ['ACCESS SHARE', 'ROW SHARE', 'ROW EXCLUSIVE', 'SHARE UPDATE EXCLUSIVE', 'SHARE', 'SHARE ROW EXCLUSIVE', 'EXCLUSIVE', 'ACCESS EXCLUSIVE'].each(&:freeze).freeze
Dataset.def_sql_method(self, :delete, [['if server_version >= 90100', %w'with delete from using where returning'], ['else', %w'delete from using where returning']])
Dataset.def_sql_method(self, :insert, [['if server_version >= 90500', %w'with insert into columns override values conflict returning'], ['elsif server_version >= 90100', %w'with insert into columns values returning'], ['else', %w'insert into columns values returning']])
Dataset.def_sql_method(self, :select, [['if opts[:values]', %w'values order limit'], ['elsif server_version >= 80400', %w'with select distinct columns from join where group having window compounds order limit lock'], ['else', %w'select distinct columns from join where group having compounds order limit lock']])
Dataset.def_sql_method(self, :update, [['if server_version >= 90100', %w'with update table set from where returning'], ['else', %w'update table set from where returning']])
# Return the results of an EXPLAIN ANALYZE query as a string
def analyze
explain(:analyze=>true)
end
# Handle converting the ruby xor operator (^) into the
# PostgreSQL xor operator (#), and use the ILIKE and NOT ILIKE
# operators.
def complex_expression_sql_append(sql, op, args)
case op
when :^
j = ' # '
c = false
args.each do |a|
sql << j if c
literal_append(sql, a)
c ||= true
end
when :ILIKE, :'NOT ILIKE'
sql << '('
literal_append(sql, args[0])
sql << ' ' << op.to_s << ' '
literal_append(sql, args[1])
sql << " ESCAPE "
literal_append(sql, "\\")
sql << ')'
else
super
end
end
# Disables automatic use of INSERT ... RETURNING. You can still use
# returning manually to force the use of RETURNING when inserting.
#
# This is designed for cases where INSERT RETURNING cannot be used,
# such as when you are using partitioning with trigger functions
# or conditional rules, or when you are using a PostgreSQL version
# less than 8.2, or a PostgreSQL derivative that does not support
# returning.
#
# Note that when this method is used, insert will not return the
# primary key of the inserted row, you will have to get the primary
# key of the inserted row before inserting via nextval, or after
# inserting via currval or lastval (making sure to use the same
# database connection for currval or lastval).
def disable_insert_returning
clone(:disable_insert_returning=>true)
end
# Return the results of an EXPLAIN query as a string
def explain(opts=OPTS)
with_sql((opts[:analyze] ? 'EXPLAIN ANALYZE ' : 'EXPLAIN ') + select_sql).map(:'QUERY PLAN').join("\r\n")
end
# Return a cloned dataset which will use FOR SHARE to lock returned rows.
def for_share
lock_style(:share)
end
# Run a full text search on PostgreSQL. By default, searching for the inclusion
# of any of the terms in any of the cols.
#
# Options:
# :headline :: Append a expression to the selected columns aliased to headline that
# contains an extract of the matched text.
# :language :: The language to use for the search (default: 'simple')
# :plain :: Whether a plain search should be used (default: false). In this case,
# terms should be a single string, and it will do a search where cols
# contains all of the words in terms. This ignores search operators in terms.
# :phrase :: Similar to :plain, but also adding an ILIKE filter to ensure that
# returned rows also include the exact phrase used.
# :rank :: Set to true to order by the rank, so that closer matches are returned first.
# :to_tsquery :: Can be set to :plain or :phrase to specify the function to use to
# convert the terms to a ts_query.
# :tsquery :: Specifies the terms argument is already a valid SQL expression returning a
# tsquery, and can be used directly in the query.
# :tsvector :: Specifies the cols argument is already a valid SQL expression returning a
# tsvector, and can be used directly in the query.
def full_text_search(cols, terms, opts = OPTS)
lang = Sequel.cast(opts[:language] || 'simple', :regconfig)
unless opts[:tsvector]
phrase_cols = full_text_string_join(cols)
cols = Sequel.function(:to_tsvector, lang, phrase_cols)
end
unless opts[:tsquery]
phrase_terms = terms.is_a?(Array) ? terms.join(' | ') : terms
query_func = case to_tsquery = opts[:to_tsquery]
when :phrase, :plain
:"#{to_tsquery}to_tsquery"
else
(opts[:phrase] || opts[:plain]) ? :plainto_tsquery : :to_tsquery
end
terms = Sequel.function(query_func, lang, phrase_terms)
end
ds = where(Sequel.lit(["", " @@ ", ""], cols, terms))
if opts[:phrase]
raise Error, "can't use :phrase with either :tsvector or :tsquery arguments to full_text_search together" if opts[:tsvector] || opts[:tsquery]
ds = ds.grep(phrase_cols, "%#{escape_like(phrase_terms)}%", :case_insensitive=>true)
end
if opts[:rank]
ds = ds.reverse{ts_rank_cd(cols, terms)}
end
if opts[:headline]
ds = ds.select_append{ts_headline(lang, phrase_cols, terms).as(:headline)}
end
ds
end
# Insert given values into the database.
def insert(*values)
if @opts[:returning]
# Already know which columns to return, let the standard code handle it
super
elsif @opts[:sql] || @opts[:disable_insert_returning]
# Raw SQL used or RETURNING disabled, just use the default behavior
# and return nil since sequence is not known.
super
nil
else
# Force the use of RETURNING with the primary key value,
# unless it has been disabled.
returning(insert_pk).insert(*values){|r| return r.values.first}
end
end
# Handle uniqueness violations when inserting, by updating the conflicting row, using
# ON CONFLICT. With no options, uses ON CONFLICT DO NOTHING. Options:
# :conflict_where :: The index filter, when using a partial index to determine uniqueness.
# :constraint :: An explicit constraint name, has precendence over :target.
# :target :: The column name or expression to handle uniqueness violations on.
# :update :: A hash of columns and values to set. Uses ON CONFLICT DO UPDATE.
# :update_where :: A WHERE condition to use for the update.
#
# Examples:
#
# DB[:table].insert_conflict.insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT DO NOTHING
#
# DB[:table].insert_conflict(constraint: :table_a_uidx).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT ON CONSTRAINT table_a_uidx DO NOTHING
#
# DB[:table].insert_conflict(target: :a).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) DO NOTHING
#
# DB[:table].insert_conflict(target: :a, conflict_where: {c: true}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) WHERE (c IS TRUE) DO NOTHING
#
# DB[:table].insert_conflict(target: :a, update: {b: Sequel[:excluded][:b]}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) DO UPDATE SET b = excluded.b
#
# DB[:table].insert_conflict(constraint: :table_a_uidx,
# update: {b: Sequel[:excluded][:b]}, update_where: {Sequel[:table][:status_id] => 1}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT ON CONSTRAINT table_a_uidx
# # DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
def insert_conflict(opts=OPTS)
clone(:insert_conflict => opts)
end
# Ignore uniqueness/exclusion violations when inserting, using ON CONFLICT DO NOTHING.
# Exists mostly for compatibility to MySQL's insert_ignore. Example:
#
# DB[:table].insert_ignore.insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT DO NOTHING
def insert_ignore
insert_conflict
end
# Insert a record, returning the record inserted, using RETURNING. Always returns nil without
# running an INSERT statement if disable_insert_returning is used. If the query runs
# but returns no values, returns false.
def insert_select(*values)
return unless supports_insert_select?
# Handle case where query does not return a row
server?(:default).with_sql_first(insert_select_sql(*values)) || false
end
# The SQL to use for an insert_select, adds a RETURNING clause to the insert
# unless the RETURNING clause is already present.
def insert_select_sql(*values)
ds = opts[:returning] ? self : returning
ds.insert_sql(*values)
end
# Support SQL::AliasedExpression as expr to setup a USING join with a table alias for the
# USING columns.
def join_table(type, table, expr=nil, options=OPTS, &block)
if expr.is_a?(SQL::AliasedExpression) && expr.expression.is_a?(Array) && !expr.expression.empty? && expr.expression.all?
options = options.merge(:join_using=>true)
end
super
end
# Locks all tables in the dataset's FROM clause (but not in JOINs) with
# the specified mode (e.g. 'EXCLUSIVE'). If a block is given, starts
# a new transaction, locks the table, and yields. If a block is not given,
# just locks the tables. Note that PostgreSQL will probably raise an error
# if you lock the table outside of an existing transaction. Returns nil.
def lock(mode, opts=OPTS)
if defined?(yield) # perform locking inside a transaction and yield to block
@db.transaction(opts){lock(mode, opts); yield}
else
sql = 'LOCK TABLE '.dup
source_list_append(sql, @opts[:from])
mode = mode.to_s.upcase.strip
unless LOCK_MODES.include?(mode)
raise Error, "Unsupported lock mode: #{mode}"
end
sql << " IN #{mode} MODE"
@db.execute(sql, opts)
end
nil
end
# Return a dataset with a WHEN MATCHED THEN DO NOTHING clause added to the
# MERGE statement. If a block is passed, treat it as a virtual row and
# use it as additional conditions for the match.
#
# merge_do_nothing_when_matched
# # WHEN MATCHED THEN DO NOTHING
#
# merge_do_nothing_when_matched{a > 30}
# # WHEN MATCHED AND (a > 30) THEN DO NOTHING
def merge_do_nothing_when_matched(&block)
_merge_when(:type=>:matched, &block)
end
# Return a dataset with a WHEN NOT MATCHED THEN DO NOTHING clause added to the
# MERGE statement. If a block is passed, treat it as a virtual row and
# use it as additional conditions for the match.
#
# merge_do_nothing_when_not_matched
# # WHEN NOT MATCHED THEN DO NOTHING
#
# merge_do_nothing_when_not_matched{a > 30}
# # WHEN NOT MATCHED AND (a > 30) THEN DO NOTHING
def merge_do_nothing_when_not_matched(&block)
_merge_when(:type=>:not_matched, &block)
end
# Support OVERRIDING USER|SYSTEM VALUE for MERGE INSERT.
def merge_insert(*values, &block)
h = {:type=>:insert, :values=>values}
if override = @opts[:override]
h[:override] = insert_override_sql(String.new)
end
_merge_when(h, &block)
end
# Use OVERRIDING USER VALUE for INSERT statements, so that identity columns
# always use the user supplied value, and an error is not raised for identity
# columns that are GENERATED ALWAYS.
def overriding_system_value
clone(:override=>:system)
end
# Use OVERRIDING USER VALUE for INSERT statements, so that identity columns
# always use the sequence value instead of the user supplied value.
def overriding_user_value
clone(:override=>:user)
end
def supports_cte?(type=:select)
if type == :select
server_version >= 80400
else
server_version >= 90100
end
end
# PostgreSQL supports using the WITH clause in subqueries if it
# supports using WITH at all (i.e. on PostgreSQL 8.4+).
def supports_cte_in_subqueries?
supports_cte?
end
# DISTINCT ON is a PostgreSQL extension
def supports_distinct_on?
true
end
# PostgreSQL 9.5+ supports GROUP CUBE
def supports_group_cube?
server_version >= 90500
end
# PostgreSQL 9.5+ supports GROUP ROLLUP
def supports_group_rollup?
server_version >= 90500
end
# PostgreSQL 9.5+ supports GROUPING SETS
def supports_grouping_sets?
server_version >= 90500
end
# True unless insert returning has been disabled for this dataset.
def supports_insert_select?
!@opts[:disable_insert_returning]
end
# PostgreSQL 9.5+ supports the ON CONFLICT clause to INSERT.
def supports_insert_conflict?
server_version >= 90500
end
# PostgreSQL 9.3+ supports lateral subqueries
def supports_lateral_subqueries?
server_version >= 90300
end
# PostgreSQL supports modifying joined datasets
def supports_modifying_joins?
true
end
# PostgreSQL 15+ supports MERGE.
def supports_merge?
server_version >= 150000
end
# PostgreSQL supports NOWAIT.
def supports_nowait?
true
end
# Returning is always supported.
def supports_returning?(type)
true
end
# PostgreSQL supports pattern matching via regular expressions
def supports_regexp?
true
end
# PostgreSQL 9.5+ supports SKIP LOCKED.
def supports_skip_locked?
server_version >= 90500
end
# PostgreSQL supports timezones in literal timestamps
def supports_timestamp_timezones?
true
end
# PostgreSQL 8.4+ supports WINDOW clause.
def supports_window_clause?
server_version >= 80400
end
# PostgreSQL 8.4+ supports window functions
def supports_window_functions?
server_version >= 80400
end
# Base support added in 8.4, offset supported added in 9.0,
# GROUPS and EXCLUDE support added in 11.0.
def supports_window_function_frame_option?(option)
case option
when :rows, :range
true
when :offset
server_version >= 90000
when :groups, :exclude
server_version >= 110000
else
false
end
end
# Truncates the dataset. Returns nil.
#
# Options:
# :cascade :: whether to use the CASCADE option, useful when truncating
# tables with foreign keys.
# :only :: truncate using ONLY, so child tables are unaffected
# :restart :: use RESTART IDENTITY to restart any related sequences
#
# :only and :restart only work correctly on PostgreSQL 8.4+.
#
# Usage:
# DB[:table].truncate
# # TRUNCATE TABLE "table"
#
# DB[:table].truncate(cascade: true, only: true, restart: true)
# # TRUNCATE TABLE ONLY "table" RESTART IDENTITY CASCADE
def truncate(opts = OPTS)
if opts.empty?
super()
else
clone(:truncate_opts=>opts).truncate
end
end
# Use WITH TIES when limiting the result set to also include additional
# rules that have the same results for the order column as the final row.
# Requires PostgreSQL 13.
def with_ties
clone(:limit_with_ties=>true)
end
protected
# If returned primary keys are requested, use RETURNING unless already set on the
# dataset. If RETURNING is already set, use existing returning values. If RETURNING
# is only set to return a single columns, return an array of just that column.
# Otherwise, return an array of hashes.
def _import(columns, values, opts=OPTS)
if @opts[:returning]
# no transaction: our multi_insert_sql_strategy should guarantee
# that there's only ever a single statement.
sql = multi_insert_sql(columns, values)[0]
returning_fetch_rows(sql).map{|v| v.length == 1 ? v.values.first : v}
elsif opts[:return] == :primary_key
returning(insert_pk)._import(columns, values, opts)
else
super
end
end
def to_prepared_statement(type, *a)
if type == :insert && !@opts.has_key?(:returning)
returning(insert_pk).send(:to_prepared_statement, :insert_pk, *a)
else
super
end
end
private
# Append the INSERT sql used in a MERGE
def _merge_insert_sql(sql, data)
sql << " THEN INSERT "
columns, values = _parse_insert_sql_args(data[:values])
_insert_columns_sql(sql, columns)
if override = data[:override]
sql << override
end
_insert_values_sql(sql, values)
end
def _merge_matched_sql(sql, data)
sql << " THEN DO NOTHING"
end
alias _merge_not_matched_sql _merge_matched_sql
# Format TRUNCATE statement with PostgreSQL specific options.
def _truncate_sql(table)
to = @opts[:truncate_opts] || OPTS
"TRUNCATE TABLE#{' ONLY' if to[:only]} #{table}#{' RESTART IDENTITY' if to[:restart]}#{' CASCADE' if to[:cascade]}"
end
# Allow truncation of multiple source tables.
def check_truncation_allowed!
raise(InvalidOperation, "Grouped datasets cannot be truncated") if opts[:group]
raise(InvalidOperation, "Joined datasets cannot be truncated") if opts[:join]
end
# Only include the primary table in the main delete clause
def delete_from_sql(sql)
sql << ' FROM '
source_list_append(sql, @opts[:from][0..0])
end
# Use USING to specify additional tables in a delete query
def delete_using_sql(sql)
join_from_sql(:USING, sql)
end
# Add ON CONFLICT clause if it should be used
def insert_conflict_sql(sql)
if opts = @opts[:insert_conflict]
sql << " ON CONFLICT"
if target = opts[:constraint]
sql << " ON CONSTRAINT "
identifier_append(sql, target)
elsif target = opts[:target]
sql << ' '
identifier_append(sql, Array(target))
if conflict_where = opts[:conflict_where]
sql << " WHERE "
literal_append(sql, conflict_where)
end
end
if values = opts[:update]
sql << " DO UPDATE SET "
update_sql_values_hash(sql, values)
if update_where = opts[:update_where]
sql << " WHERE "
literal_append(sql, update_where)
end
else
sql << " DO NOTHING"
end
end
end
# Include aliases when inserting into a single table on PostgreSQL 9.5+.
def insert_into_sql(sql)
sql << " INTO "
if (f = @opts[:from]) && f.length == 1
identifier_append(sql, server_version >= 90500 ? f.first : unaliased_identifier(f.first))
else
source_list_append(sql, f)
end
end
# Return the primary key to use for RETURNING in an INSERT statement
def insert_pk
(f = opts[:from]) && !f.empty? && (t = f.first)
case t
when Symbol, String, SQL::Identifier, SQL::QualifiedIdentifier
if pk = db.primary_key(t)
Sequel::SQL::Identifier.new(pk)
end
end
end
# Support OVERRIDING SYSTEM|USER VALUE in insert statements
def insert_override_sql(sql)
case opts[:override]
when :system
sql << " OVERRIDING SYSTEM VALUE"
when :user
sql << " OVERRIDING USER VALUE"
end
end
# For multiple table support, PostgreSQL requires at least
# two from tables, with joins allowed.
def join_from_sql(type, sql)
if(from = @opts[:from][1..-1]).empty?
raise(Error, 'Need multiple FROM tables if updating/deleting a dataset with JOINs') if @opts[:join]
else
sql << ' ' << type.to_s << ' '
source_list_append(sql, from)
select_join_sql(sql)
end
end
# Support table aliases for USING columns
def join_using_clause_using_sql_append(sql, using_columns)
if using_columns.is_a?(SQL::AliasedExpression)
super(sql, using_columns.expression)
sql << ' AS '
identifier_append(sql, using_columns.alias)
else
super
end
end
# Use a generic blob quoting method, hopefully overridden in one of the subadapter methods
def literal_blob_append(sql, v)
sql << "'" << v.gsub(/[\000-\037\047\134\177-\377]/n){|b| "\\#{("%o" % b[0..1].unpack("C")[0]).rjust(3, '0')}"} << "'"
end
# PostgreSQL uses FALSE for false values
def literal_false
'false'
end
# PostgreSQL quotes NaN and Infinity.
def literal_float(value)
if value.finite?
super
elsif value.nan?
"'NaN'"
elsif value.infinite? == 1
"'Infinity'"
else
"'-Infinity'"
end
end
# Handle Ruby integers outside PostgreSQL bigint range specially.
def literal_integer(v)
if v > 9223372036854775807 || v < -9223372036854775808
literal_integer_outside_bigint_range(v)
else
v.to_s
end
end
# Raise IntegerOutsideBigintRange when attempting to literalize Ruby integer
# outside PostgreSQL bigint range, so PostgreSQL doesn't treat
# the value as numeric.
def literal_integer_outside_bigint_range(v)
raise IntegerOutsideBigintRange, "attempt to literalize Ruby integer outside PostgreSQL bigint range: #{v}"
end
# Assume that SQL standard quoting is on, per Sequel's defaults
def literal_string_append(sql, v)
sql << "'" << v.gsub("'", "''") << "'"
end
# PostgreSQL uses true for true values
def literal_true
'true'
end
# PostgreSQL supports multiple rows in INSERT.
def multi_insert_sql_strategy
:values
end
# Dataset options that do not affect the generated SQL.
def non_sql_option?(key)
super || key == :cursor || key == :insert_conflict
end
# PostgreSQL requires parentheses around compound datasets if they use
# CTEs, and using them in other places doesn't hurt.
def compound_dataset_sql_append(sql, ds)
sql << '('
super
sql << ')'
end
# Backslash is supported by default as the escape character on PostgreSQL,
# and using ESCAPE can break LIKE ANY() usage.
def requires_like_escape?
false
end
# Support FETCH FIRST WITH TIES on PostgreSQL 13+.
def select_limit_sql(sql)
l = @opts[:limit]
o = @opts[:offset]
return unless l || o
if @opts[:limit_with_ties]
if o
sql << " OFFSET "
literal_append(sql, o)
end
if l
sql << " FETCH FIRST "
literal_append(sql, l)
sql << " ROWS WITH TIES"
end
else
if l
sql << " LIMIT "
literal_append(sql, l)
end
if o
sql << " OFFSET "
literal_append(sql, o)
end
end
end
# Support FOR SHARE locking when using the :share lock style.
# Use SKIP LOCKED if skipping locked rows.
def select_lock_sql(sql)
lock = @opts[:lock]
if lock == :share
sql << ' FOR SHARE'
else
super
end
if lock
if @opts[:skip_locked]
sql << " SKIP LOCKED"
elsif @opts[:nowait]
sql << " NOWAIT"
end
end
end
# Support VALUES clause instead of the SELECT clause to return rows.
def select_values_sql(sql)
sql << "VALUES "
expression_list_append(sql, opts[:values])
end
# Use WITH RECURSIVE instead of WITH if any of the CTEs is recursive
def select_with_sql_base
opts[:with].any?{|w| w[:recursive]} ? "WITH RECURSIVE " : super
end
# Support PostgreSQL 14+ CTE SEARCH/CYCLE clauses
def select_with_sql_cte(sql, cte)
super
select_with_sql_cte_search_cycle(sql, cte)
end
def select_with_sql_cte_search_cycle(sql, cte)
if search_opts = cte[:search]
sql << if search_opts[:type] == :breadth
" SEARCH BREADTH FIRST BY "
else
" SEARCH DEPTH FIRST BY "
end
identifier_list_append(sql, Array(search_opts[:by]))
sql << " SET "
identifier_append(sql, search_opts[:set] || :ordercol)
end
if cycle_opts = cte[:cycle]
sql << " CYCLE "
identifier_list_append(sql, Array(cycle_opts[:columns]))
sql << " SET "
identifier_append(sql, cycle_opts[:cycle_column] || :is_cycle)
if cycle_opts.has_key?(:cycle_value)
sql << " TO "
literal_append(sql, cycle_opts[:cycle_value])
sql << " DEFAULT "
literal_append(sql, cycle_opts.fetch(:noncycle_value, false))
end
sql << " USING "
identifier_append(sql, cycle_opts[:path_column] || :path)
end
end
# The version of the database server
def server_version
db.server_version(@opts[:server])
end
# PostgreSQL 9.4+ supports the FILTER clause for aggregate functions.
def supports_filtered_aggregates?
server_version >= 90400
end
# PostgreSQL supports quoted function names.
def supports_quoted_function_names?
true
end
# Concatenate the expressions with a space in between
def full_text_string_join(cols)
cols = Array(cols).map{|x| SQL::Function.new(:COALESCE, x, '')}
cols = cols.zip([' '] * cols.length).flatten
cols.pop
SQL::StringExpression.new(:'||', *cols)
end
# Use FROM to specify additional tables in an update query
def update_from_sql(sql)
join_from_sql(:FROM, sql)
end
# Only include the primary table in the main update clause
def update_table_sql(sql)
sql << ' '
source_list_append(sql, @opts[:from][0..0])
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/sqlanywhere.rb 0000664 0000000 0000000 00000034235 14342141206 0023113 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/columns_limit_1'
module Sequel
module SqlAnywhere
Sequel::Database.set_shared_adapter_scheme(:sqlanywhere, self)
module DatabaseMethods
attr_reader :conversion_procs
# Set whether to convert smallint type to boolean for this Database instance
attr_accessor :convert_smallint_to_bool
def database_type
:sqlanywhere
end
def freeze
@conversion_procs.freeze
super
end
def to_application_timestamp_sa(v)
to_application_timestamp(v.to_s) if v
end
def schema_parse_table(table, opts)
m = output_identifier_meth(opts[:dataset])
im = input_identifier_meth(opts[:dataset])
metadata_dataset.
from{sa_describe_query("select * from #{im.call(table)}").as(:a)}.
join(Sequel[:syscolumn].as(:b), :table_id=>:base_table_id, :column_id=>:base_column_id).
order{a[:column_number]}.
map do |row|
auto_increment = row.delete(:is_autoincrement)
row[:auto_increment] = auto_increment == 1 || auto_increment == true
row[:primary_key] = row.delete(:pkey) == 'Y'
row[:allow_null] = row[:nulls_allowed].is_a?(Integer) ? row.delete(:nulls_allowed) == 1 : row.delete(:nulls_allowed)
row[:db_type] = row.delete(:domain_name)
row[:type] = if row[:db_type] =~ /numeric/i and (row[:scale].is_a?(Integer) ? row[:scale] == 0 : !row[:scale])
:integer
else
schema_column_type(row[:db_type])
end
row[:max_length] = row[:width] if row[:type] == :string
[m.call(row.delete(:name)), row]
end
end
def indexes(table, opts = OPTS)
m = output_identifier_meth
im = input_identifier_meth
table = table.value if table.is_a?(Sequel::SQL::Identifier)
indexes = {}
metadata_dataset.
from(Sequel[:dbo][:sysobjects].as(:z)).
select{[
z[:name].as(:table_name),
i[:name].as(:index_name),
si[:indextype].as(:type),
si[:colnames].as(:columns)]}.
join(Sequel[:dbo][:sysindexes].as(:i), :id=>:id).
join(Sequel[:sys][:sysindexes].as(:si), :iname=> :name).
where{{z[:type] => 'U', :table_name=>im.call(table)}}.
each do |r|
indexes[m.call(r[:index_name])] =
{:unique=>(r[:type].downcase=='unique'),
:columns=>r[:columns].split(',').map{|v| m.call(v.split(' ').first)}} unless r[:type].downcase == 'primary key'
end
indexes
end
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
fk_indexes = {}
metadata_dataset.
from{sys[:sysforeignkey].as(:fk)}.
select{[
fk[:role].as(:name),
fks[:columns].as(:column_map),
si[:indextype].as(:type),
si[:colnames].as(:columns),
fks[:primary_tname].as(:table_name)]}.
join(Sequel[:sys][:sysforeignkeys].as(:fks), :role => :role).
join(Sequel[:sys][:sysindexes].as(:si), {:iname => Sequel[:fk][:role]}, {:implicit_qualifier => :fk}).
where{{fks[:foreign_tname]=>im.call(table)}}.
each do |r|
unless r[:type].downcase == 'primary key'
fk_indexes[r[:name]] =
{:name=>m.call(r[:name]),
:columns=>r[:columns].split(',').map{|v| m.call(v.split(' ').first)},
:table=>m.call(r[:table_name]),
:key=>r[:column_map].split(',').map{|v| m.call(v.split(' IS ').last)}}
end
end
fk_indexes.values
end
def tables(opts=OPTS)
tables_and_views('U', opts)
end
def views(opts=OPTS)
tables_and_views('V', opts)
end
private
DATABASE_ERROR_REGEXPS = {
/would not be unique|Primary key for table.+is not unique/ => Sequel::UniqueConstraintViolation,
/Column .* in table .* cannot be NULL/ => Sequel::NotNullConstraintViolation,
/Constraint .* violated: Invalid value in table .*/ => Sequel::CheckConstraintViolation,
/No primary key value for foreign key .* in table .*/ => Sequel::ForeignKeyConstraintViolation,
/Primary key for row in table .* is referenced by foreign key .* in table .*/ => Sequel::ForeignKeyConstraintViolation
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Sybase uses the IDENTITY column for autoincrementing columns.
def auto_increment_sql
'IDENTITY'
end
# Sybase does not allow adding primary key constraints to NULLable columns.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
def temporary_table_sql
"GLOBAL TEMPORARY "
end
def begin_transaction_sql
"BEGIN TRANSACTION"
end
def rollback_transaction_sql
"IF @@TRANCOUNT > 0 ROLLBACK TRANSACTION"
end
def commit_transaction_sql
"COMMIT TRANSACTION"
end
# Sybase has both datetime and timestamp classes, most people are going
# to want datetime
def type_literal_generic_datetime(column)
:datetime
end
# Sybase doesn't have a true boolean class, so it uses integer
def type_literal_generic_trueclass(column)
:smallint
end
# SQLAnywhere uses image type for blobs
def type_literal_generic_file(column)
:image
end
def alter_table_sql(table, op)
case op[:op]
when :add_column
"ALTER TABLE #{quote_schema_table(table)} ADD #{column_definition_sql(op)}"
when :drop_column
"ALTER TABLE #{quote_schema_table(table)} DROP #{column_definition_sql(op)}"
when :drop_constraint
case op[:type]
when :primary_key
"ALTER TABLE #{quote_schema_table(table)} DROP PRIMARY KEY"
when :foreign_key
if op[:name] || op[:columns]
name = op[:name] || foreign_key_name(table, op[:columns])
if name
"ALTER TABLE #{quote_schema_table(table)} DROP FOREIGN KEY #{quote_identifier(name)}"
end
end
else
super
end
when :rename_column
"ALTER TABLE #{quote_schema_table(table)} RENAME #{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name].to_s)}"
when :set_column_type
"ALTER TABLE #{quote_schema_table(table)} ALTER #{quote_identifier(op[:name])} #{type_literal(op)}"
when :set_column_null
"ALTER TABLE #{quote_schema_table(table)} ALTER #{quote_identifier(op[:name])} #{'NOT ' unless op[:null]}NULL"
when :set_column_default
"ALTER TABLE #{quote_schema_table(table)} ALTER #{quote_identifier(op[:name])} DEFAULT #{literal(op[:default])}"
else
super(table, op)
end
end
# SQLAnywhere tinyint types are unsigned.
def column_schema_tinyint_type_is_unsigned?
true
end
# SqlAnywhere doesn't support CREATE TABLE AS, it only supports SELECT INTO.
# Emulating CREATE TABLE AS using SELECT INTO is only possible if a dataset
# is given as the argument, it can't work with a string, so raise an
# Error if a string is given.
def create_table_as(name, ds, options)
raise(Error, "must provide dataset instance as value of create_table :as option on SqlAnywhere") unless ds.is_a?(Sequel::Dataset)
run(ds.into(name).sql)
end
# Use SP_RENAME to rename the table
def rename_table_sql(name, new_name)
"ALTER TABLE #{quote_schema_table(name)} RENAME #{quote_schema_table(new_name)}"
end
# Convert smallint type to boolean if convert_smallint_to_bool is true
def schema_column_type(db_type)
if convert_smallint_to_bool && db_type =~ /smallint/i
:boolean
elsif db_type =~ /unsigned (big)?int/i
:integer
else
super
end
end
def tables_and_views(type, opts=OPTS)
m = output_identifier_meth
metadata_dataset.
from{sysobjects.as(:a)}.
where{{a[:type]=>type}}.
select_map{a[:name]}.
map{|n| m.call(n)}
end
# SQLAnywhere supports views with check option, but not local.
def view_with_check_option_support
true
end
end
module DatasetMethods
Dataset.def_sql_method(self, :insert, %w'insert into columns values')
Dataset.def_sql_method(self, :select, %w'with select distinct limit columns into from join where group having window compounds order lock')
include ::Sequel::Dataset::ColumnsLimit1
# Whether to convert smallint to boolean arguments for this dataset.
# Defaults to the IBMDB module setting.
def convert_smallint_to_bool
opts.has_key?(:convert_smallint_to_bool) ? opts[:convert_smallint_to_bool] : db.convert_smallint_to_bool
end
# Return a cloned dataset with the convert_smallint_to_bool option set.
def with_convert_smallint_to_bool(v)
clone(:convert_smallint_to_bool=>v)
end
def supports_cte?(type=:select)
type == :select
end
# SQLAnywhere supports GROUPING SETS
def supports_grouping_sets?
true
end
def supports_multiple_column_in?
false
end
def supports_where_true?
false
end
def supports_is_true?
false
end
def supports_join_using?
false
end
def supports_timestamp_usecs?
false
end
def supports_window_clause?
true
end
def supports_window_functions?
true
end
# Uses CROSS APPLY to join the given table into the current dataset.
def cross_apply(table)
join_table(:cross_apply, table)
end
# SqlAnywhere requires recursive CTEs to have column aliases.
def recursive_cte_requires_column_aliases?
true
end
def complex_expression_sql_append(sql, op, args)
case op
when :'||'
super(sql, :+, args)
when :<<, :>>
complex_expression_emulate_append(sql, op, args)
when :LIKE, :"NOT LIKE"
sql << '('
literal_append(sql, args[0])
sql << (op == :LIKE ? ' REGEXP ' : ' NOT REGEXP ')
pattern = String.new
last_c = ''
args[1].each_char do |c|
if c == '_' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '.'
elsif c == '%' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '.*'
elsif c == '[' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '\['
elsif c == ']' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '\]'
elsif c == '*' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '\*'
elsif c == '?' and not pattern.end_with?('\\') and last_c != '\\'
pattern << '\?'
else
pattern << c
end
if c == '\\' and last_c == '\\'
last_c = ''
else
last_c = c
end
end
literal_append(sql, pattern)
sql << " ESCAPE "
literal_append(sql, "\\")
sql << ')'
when :ILIKE, :"NOT ILIKE"
super(sql, (op == :ILIKE ? :LIKE : :"NOT LIKE"), args)
when :extract
sql << 'datepart('
literal_append(sql, args[0])
sql << ','
literal_append(sql, args[1])
sql << ')'
else
super
end
end
# SqlAnywhere uses \\ to escape metacharacters, but a ']' should not be escaped
def escape_like(string)
string.gsub(/[\\%_\[]/){|m| "\\#{m}"}
end
# Use today() for CURRENT_DATE and now() for CURRENT_TIMESTAMP and CURRENT_TIME
def constant_sql_append(sql, constant)
case constant
when :CURRENT_DATE
sql << 'today()'
when :CURRENT_TIMESTAMP, :CURRENT_TIME
sql << 'now()'
else
super
end
end
# Specify a table for a SELECT ... INTO query.
def into(table)
clone(:into => table)
end
private
# Use 1 for true on Sybase
def literal_true
'1'
end
# Use 0 for false on Sybase
def literal_false
'0'
end
# SQL fragment for String. Doubles \ and ' by default.
def literal_string_append(sql, v)
sql << "'" << v.gsub("\\", "\\\\\\\\").gsub("'", "''") << "'"
end
# SqlAnywhere uses a preceding X for hex escaping strings
def literal_blob_append(sql, v)
if v.empty?
literal_append(sql, "")
else
sql << "0x" << v.unpack("H*").first
end
end
# Sybase supports multiple rows in INSERT.
def multi_insert_sql_strategy
:values
end
# SQLAnywhere does not natively support NULLS FIRST/LAST.
def requires_emulating_nulls_first?
true
end
def select_into_sql(sql)
if i = @opts[:into]
sql << " INTO "
identifier_append(sql, i)
end
end
# Sybase uses TOP N for limit.
def select_limit_sql(sql)
l = @opts[:limit]
o = @opts[:offset]
if l || o
if l
sql << " TOP "
literal_append(sql, l)
else
sql << " TOP 2147483647"
end
if o
sql << " START AT ("
literal_append(sql, o)
sql << " + 1)"
end
end
end
# Use WITH RECURSIVE instead of WITH if any of the CTEs is recursive
def select_with_sql_base
opts[:with].any?{|w| w[:recursive]} ? "WITH RECURSIVE " : super
end
def join_type_sql(join_type)
case join_type
when :cross_apply
'CROSS APPLY'
when :outer_apply
'OUTER APPLY'
else
super
end
end
# SQLAnywhere supports millisecond timestamp precision.
def timestamp_precision
3
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/shared/sqlite.rb 0000664 0000000 0000000 00000116214 14342141206 0022050 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../utils/replace'
require_relative '../utils/unmodified_identifiers'
module Sequel
module SQLite
Sequel::Database.set_shared_adapter_scheme(:sqlite, self)
def self.mock_adapter_setup(db)
db.instance_exec do
@sqlite_version = 30903
def schema_parse_table(*)
[]
end
singleton_class.send(:private, :schema_parse_table)
end
end
# No matter how you connect to SQLite, the following Database options
# can be used to set PRAGMAs on connections in a thread-safe manner:
# :auto_vacuum, :foreign_keys, :synchronous, and :temp_store.
module DatabaseMethods
include UnmodifiedIdentifiers::DatabaseMethods
AUTO_VACUUM = [:none, :full, :incremental].freeze
SYNCHRONOUS = [:off, :normal, :full].freeze
TEMP_STORE = [:default, :file, :memory].freeze
TRANSACTION_MODE = {
:deferred => "BEGIN DEFERRED TRANSACTION".freeze,
:immediate => "BEGIN IMMEDIATE TRANSACTION".freeze,
:exclusive => "BEGIN EXCLUSIVE TRANSACTION".freeze,
nil => "BEGIN".freeze
}.freeze
# Whether to use integers for booleans in the database. SQLite recommends
# booleans be stored as integers, but historically Sequel has used 't'/'f'.
attr_accessor :integer_booleans
# Whether to keep CURRENT_TIMESTAMP and similar expressions in UTC. By
# default, the expressions are converted to localtime.
attr_accessor :current_timestamp_utc
# A symbol signifying the value of the default transaction mode
attr_reader :transaction_mode
# Set the default transaction mode.
def transaction_mode=(value)
if TRANSACTION_MODE.include?(value)
@transaction_mode = value
else
raise Error, "Invalid value for transaction_mode. Please specify one of :deferred, :immediate, :exclusive, nil"
end
end
# SQLite uses the :sqlite database type.
def database_type
:sqlite
end
# Set the integer_booleans option using the passed in :integer_boolean option.
def set_integer_booleans
@integer_booleans = @opts.has_key?(:integer_booleans) ? typecast_value_boolean(@opts[:integer_booleans]) : true
end
# Return the array of foreign key info hashes using the foreign_key_list PRAGMA,
# including information for the :on_update and :on_delete entries.
def foreign_key_list(table, opts=OPTS)
m = output_identifier_meth
h = {}
_foreign_key_list_ds(table).each do |row|
if r = h[row[:id]]
r[:columns] << m.call(row[:from])
r[:key] << m.call(row[:to]) if r[:key]
else
h[row[:id]] = {:columns=>[m.call(row[:from])], :table=>m.call(row[:table]), :key=>([m.call(row[:to])] if row[:to]), :on_update=>on_delete_sql_to_sym(row[:on_update]), :on_delete=>on_delete_sql_to_sym(row[:on_delete])}
end
end
h.values
end
def freeze
sqlite_version
use_timestamp_timezones?
super
end
# Use the index_list and index_info PRAGMAs to determine the indexes on the table.
def indexes(table, opts=OPTS)
m = output_identifier_meth
im = input_identifier_meth
indexes = {}
table = table.value if table.is_a?(Sequel::SQL::Identifier)
metadata_dataset.with_sql("PRAGMA index_list(?)", im.call(table)).each do |r|
if opts[:only_autocreated]
# If specifically asked for only autocreated indexes, then return those an only those
next unless r[:name] =~ /\Asqlite_autoindex_/
elsif r.has_key?(:origin)
# If origin is set, then only exclude primary key indexes and partial indexes
next if r[:origin] == 'pk'
next if r[:partial].to_i == 1
else
# When :origin key not present, assume any autoindex could be a primary key one and exclude it
next if r[:name] =~ /\Asqlite_autoindex_/
end
indexes[m.call(r[:name])] = {:unique=>r[:unique].to_i==1}
end
indexes.each do |k, v|
v[:columns] = metadata_dataset.with_sql("PRAGMA index_info(?)", im.call(k)).map(:name).map{|x| m.call(x)}
end
indexes
end
# The version of the server as an integer, where 3.6.19 = 30619.
# If the server version can't be determined, 0 is used.
def sqlite_version
return @sqlite_version if defined?(@sqlite_version)
@sqlite_version = begin
v = fetch('SELECT sqlite_version()').single_value
[10000, 100, 1].zip(v.split('.')).inject(0){|a, m| a + m[0] * Integer(m[1])}
rescue
0
end
end
# SQLite supports CREATE TABLE IF NOT EXISTS syntax since 3.3.0.
def supports_create_table_if_not_exists?
sqlite_version >= 30300
end
# SQLite 3.6.19+ supports deferrable foreign key constraints.
def supports_deferrable_foreign_key_constraints?
sqlite_version >= 30619
end
# SQLite 3.8.0+ supports partial indexes.
def supports_partial_indexes?
sqlite_version >= 30800
end
# SQLite 3.6.8+ supports savepoints.
def supports_savepoints?
sqlite_version >= 30608
end
# Override the default setting for whether to use timezones in timestamps.
# It is set to +false+ by default, as SQLite's date/time methods do not
# support timezones in timestamps.
attr_writer :use_timestamp_timezones
# SQLite supports timezones in timestamps, since it just stores them as strings,
# but it breaks the usage of SQLite's datetime functions.
def use_timestamp_timezones?
defined?(@use_timestamp_timezones) ? @use_timestamp_timezones : (@use_timestamp_timezones = false)
end
# Array of symbols specifying the table names in the current database.
#
# Options:
# :server :: Set the server to use.
def tables(opts=OPTS)
tables_and_views(Sequel.~(:name=>'sqlite_sequence') & {:type => 'table'}, opts)
end
# Creates a dataset that uses the VALUES clause:
#
# DB.values([[1, 2], [3, 4]])
# # VALUES ((1, 2), (3, 4))
def values(v)
@default_dataset.clone(:values=>v)
end
# Array of symbols specifying the view names in the current database.
#
# Options:
# :server :: Set the server to use.
def views(opts=OPTS)
tables_and_views({:type => 'view'}, opts)
end
private
# Dataset used for parsing foreign key lists
def _foreign_key_list_ds(table)
metadata_dataset.with_sql("PRAGMA foreign_key_list(?)", input_identifier_meth.call(table))
end
# Dataset used for parsing schema
def _parse_pragma_ds(table_name, opts)
metadata_dataset.with_sql("PRAGMA table_#{'x' if sqlite_version > 33100}info(?)", input_identifier_meth(opts[:dataset]).call(table_name))
end
# Run all alter_table commands in a transaction. This is technically only
# needed for drop column.
def apply_alter_table(table, ops)
fks = fetch("PRAGMA foreign_keys")
if fks
run "PRAGMA foreign_keys = 0"
run "PRAGMA legacy_alter_table = 1" if sqlite_version >= 32600
end
transaction do
if ops.length > 1 && ops.all?{|op| op[:op] == :add_constraint || op[:op] == :set_column_null}
null_ops, ops = ops.partition{|op| op[:op] == :set_column_null}
# Apply NULL/NOT NULL ops first, since those should be purely idependent of the constraints.
null_ops.each{|op| alter_table_sql_list(table, [op]).flatten.each{|sql| execute_ddl(sql)}}
# If you are just doing constraints, apply all of them at the same time,
# as otherwise all but the last one get lost.
alter_table_sql_list(table, [{:op=>:add_constraints, :ops=>ops}]).flatten.each{|sql| execute_ddl(sql)}
else
# Run each operation separately, as later operations may depend on the
# results of earlier operations.
ops.each{|op| alter_table_sql_list(table, [op]).flatten.each{|sql| execute_ddl(sql)}}
end
end
remove_cached_schema(table)
ensure
if fks
run "PRAGMA foreign_keys = 1"
run "PRAGMA legacy_alter_table = 0" if sqlite_version >= 32600
end
end
# SQLite supports limited table modification. You can add a column
# or an index. Dropping columns is supported by copying the table into
# a temporary table, dropping the table, and creating a new table without
# the column inside of a transaction.
def alter_table_sql(table, op)
case op[:op]
when :add_index, :drop_index
super
when :add_column
if op[:unique] || op[:primary_key]
duplicate_table(table){|columns| columns.push(op)}
else
super
end
when :drop_column
if sqlite_version >= 33500
super
else
ocp = lambda{|oc| oc.delete_if{|c| c.to_s == op[:name].to_s}}
duplicate_table(table, :old_columns_proc=>ocp){|columns| columns.delete_if{|s| s[:name].to_s == op[:name].to_s}}
end
when :rename_column
if sqlite_version >= 32500
super
else
ncp = lambda{|nc| nc.map!{|c| c.to_s == op[:name].to_s ? op[:new_name] : c}}
duplicate_table(table, :new_columns_proc=>ncp){|columns| columns.each{|s| s[:name] = op[:new_name] if s[:name].to_s == op[:name].to_s}}
end
when :set_column_default
duplicate_table(table){|columns| columns.each{|s| s[:default] = op[:default] if s[:name].to_s == op[:name].to_s}}
when :set_column_null
duplicate_table(table){|columns| columns.each{|s| s[:null] = op[:null] if s[:name].to_s == op[:name].to_s}}
when :set_column_type
duplicate_table(table){|columns| columns.each{|s| s.merge!(op) if s[:name].to_s == op[:name].to_s}}
when :drop_constraint
case op[:type]
when :primary_key
duplicate_table(table) do |columns|
columns.each do |s|
s[:unique] = false if s[:primary_key]
s[:primary_key] = s[:auto_increment] = nil
end
end
when :foreign_key
if op[:columns]
duplicate_table(table, :skip_foreign_key_columns=>op[:columns])
else
duplicate_table(table, :no_foreign_keys=>true)
end
when :unique
duplicate_table(table, :no_unique=>true)
else
duplicate_table(table)
end
when :add_constraint
duplicate_table(table, :constraints=>[op])
when :add_constraints
duplicate_table(table, :constraints=>op[:ops])
else
raise Error, "Unsupported ALTER TABLE operation: #{op[:op].inspect}"
end
end
def begin_new_transaction(conn, opts)
mode = opts[:mode] || @transaction_mode
sql = TRANSACTION_MODE[mode] or raise Error, "transaction :mode must be one of: :deferred, :immediate, :exclusive, nil"
log_connection_execute(conn, sql)
set_transaction_isolation(conn, opts)
end
# A name to use for the backup table
def backup_table_name(table, opts=OPTS)
table = table.gsub('`', '')
(opts[:times]||1000).times do |i|
table_name = "#{table}_backup#{i}"
return table_name unless table_exists?(table_name)
end
end
# SQLite allows adding primary key constraints on NULLABLE columns, but then
# does not enforce NOT NULL for such columns, so force setting the columns NOT NULL.
def can_add_primary_key_constraint_on_nullable_columns?
false
end
# Surround default with parens to appease SQLite. Add support for GENERATED ALWAYS AS.
def column_definition_default_sql(sql, column)
sql << " DEFAULT (#{literal(column[:default])})" if column.include?(:default)
if (generated = column[:generated_always_as])
if (generated_type = column[:generated_type]) && (generated_type == :stored || generated_type == :virtual)
generated_type = generated_type.to_s.upcase
end
sql << " GENERATED ALWAYS AS (#{literal(generated)}) #{generated_type}"
end
end
# SQLite does not restrict the integer type to a specific range.
def column_schema_integer_min_max_values(db_type)
nil
end
# Array of PRAGMA SQL statements based on the Database options that should be applied to
# new connections.
def connection_pragmas
ps = []
v = typecast_value_boolean(opts.fetch(:foreign_keys, 1))
ps << "PRAGMA foreign_keys = #{v ? 1 : 0}"
v = typecast_value_boolean(opts.fetch(:case_sensitive_like, 1))
ps << "PRAGMA case_sensitive_like = #{v ? 1 : 0}"
[[:auto_vacuum, AUTO_VACUUM], [:synchronous, SYNCHRONOUS], [:temp_store, TEMP_STORE]].each do |prag, con|
if v = opts[prag]
raise(Error, "Value for PRAGMA #{prag} not supported, should be one of #{con.join(', ')}") unless v = con.index(v.to_sym)
ps << "PRAGMA #{prag} = #{v}"
end
end
ps
end
# Support creating STRICT tables via :strict option
def create_table_sql(name, generator, options)
"#{super}#{' STRICT' if options[:strict]}"
end
# SQLite support creating temporary views.
def create_view_prefix_sql(name, options)
create_view_sql_append_columns("CREATE #{'TEMPORARY 'if options[:temp]}VIEW #{quote_schema_table(name)}", options[:columns])
end
DATABASE_ERROR_REGEXPS = {
/(is|are) not unique\z|PRIMARY KEY must be unique\z|UNIQUE constraint failed: .+\z/ => UniqueConstraintViolation,
/foreign key constraint failed\z/i => ForeignKeyConstraintViolation,
/\A(SQLITE ERROR 275 \(CONSTRAINT_CHECK\) : )?CHECK constraint failed/ => CheckConstraintViolation,
/\A(SQLITE ERROR 19 \(CONSTRAINT\) : )?constraint failed\z/ => ConstraintViolation,
/\Acannot store [A-Z]+ value in [A-Z]+ column / => ConstraintViolation,
/may not be NULL\z|NOT NULL constraint failed: .+\z/ => NotNullConstraintViolation,
/\ASQLITE ERROR \d+ \(\) : CHECK constraint failed: / => CheckConstraintViolation
}.freeze
def database_error_regexps
DATABASE_ERROR_REGEXPS
end
# Recognize SQLite error codes if the exception provides access to them.
def database_specific_error_class(exception, opts)
case sqlite_error_code(exception)
when 1299
NotNullConstraintViolation
when 1555, 2067, 2579
UniqueConstraintViolation
when 787
ForeignKeyConstraintViolation
when 275
CheckConstraintViolation
when 19
ConstraintViolation
when 517
SerializationFailure
else
super
end
end
# The array of column schema hashes for the current columns in the table
def defined_columns_for(table)
cols = parse_pragma(table, OPTS)
cols.each do |c|
c[:default] = LiteralString.new(c[:default]) if c[:default]
c[:type] = c[:db_type]
end
cols
end
# Duplicate an existing table by creating a new table, copying all records
# from the existing table into the new table, deleting the existing table
# and renaming the new table to the existing table's name.
def duplicate_table(table, opts=OPTS)
remove_cached_schema(table)
def_columns = defined_columns_for(table)
old_columns = def_columns.map{|c| c[:name]}
opts[:old_columns_proc].call(old_columns) if opts[:old_columns_proc]
yield def_columns if defined?(yield)
constraints = (opts[:constraints] || []).dup
pks = []
def_columns.each{|c| pks << c[:name] if c[:primary_key]}
if pks.length > 1
constraints << {:type=>:primary_key, :columns=>pks}
def_columns.each{|c| c[:primary_key] = false if c[:primary_key]}
end
# If dropping a foreign key constraint, drop all foreign key constraints,
# as there is no way to determine which one to drop.
unless opts[:no_foreign_keys]
fks = foreign_key_list(table)
# If dropping a column, if there is a foreign key with that
# column, don't include it when building a copy of the table.
if ocp = opts[:old_columns_proc]
fks.delete_if{|c| ocp.call(c[:columns].dup) != c[:columns]}
end
# Skip any foreign key columns where a constraint for those
# foreign keys is being dropped.
if sfkc = opts[:skip_foreign_key_columns]
fks.delete_if{|c| c[:columns] == sfkc}
end
constraints.concat(fks.each{|h| h[:type] = :foreign_key})
end
# Determine unique constraints and make sure the new columns have them
unique_columns = []
skip_indexes = []
indexes(table, :only_autocreated=>true).each do |name, h|
skip_indexes << name
if h[:unique] && !opts[:no_unique]
if h[:columns].length == 1
unique_columns.concat(h[:columns])
elsif h[:columns].map(&:to_s) != pks
constraints << {:type=>:unique, :columns=>h[:columns]}
end
end
end
unique_columns -= pks
unless unique_columns.empty?
unique_columns.map!{|c| quote_identifier(c)}
def_columns.each do |c|
c[:unique] = true if unique_columns.include?(quote_identifier(c[:name])) && c[:unique] != false
end
end
def_columns_str = (def_columns.map{|c| column_definition_sql(c)} + constraints.map{|c| constraint_definition_sql(c)}).join(', ')
new_columns = old_columns.dup
opts[:new_columns_proc].call(new_columns) if opts[:new_columns_proc]
qt = quote_schema_table(table)
bt = quote_identifier(backup_table_name(qt))
a = [
"ALTER TABLE #{qt} RENAME TO #{bt}",
"CREATE TABLE #{qt}(#{def_columns_str})",
"INSERT INTO #{qt}(#{dataset.send(:identifier_list, new_columns)}) SELECT #{dataset.send(:identifier_list, old_columns)} FROM #{bt}",
"DROP TABLE #{bt}"
]
indexes(table).each do |name, h|
next if skip_indexes.include?(name)
if (h[:columns].map(&:to_s) - new_columns).empty?
a << alter_table_sql(table, h.merge(:op=>:add_index, :name=>name))
end
end
a
end
# Does the reverse of on_delete_clause, eg. converts strings like +'SET NULL'+
# to symbols +:set_null+.
def on_delete_sql_to_sym(str)
case str
when 'RESTRICT'
:restrict
when 'CASCADE'
:cascade
when 'SET NULL'
:set_null
when 'SET DEFAULT'
:set_default
when 'NO ACTION'
:no_action
end
end
# Parse the output of the table_info pragma
def parse_pragma(table_name, opts)
pks = 0
sch = _parse_pragma_ds(table_name, opts).map do |row|
if sqlite_version > 33100
# table_xinfo PRAGMA used, remove hidden columns
# that are not generated columns
if row[:generated] = (row.delete(:hidden) != 0)
next unless row[:type].end_with?(' GENERATED ALWAYS')
row[:type] = row[:type].sub(' GENERATED ALWAYS', '')
end
end
row.delete(:cid)
row[:allow_null] = row.delete(:notnull).to_i == 0
row[:default] = row.delete(:dflt_value)
row[:default] = nil if blank_object?(row[:default]) || row[:default] == 'NULL'
row[:db_type] = row.delete(:type)
if row[:primary_key] = row.delete(:pk).to_i > 0
pks += 1
# Guess that an integer primary key uses auto increment,
# since that is Sequel's default and SQLite does not provide
# a way to introspect whether it is actually autoincrementing.
row[:auto_increment] = row[:db_type].downcase == 'integer'
end
row[:type] = schema_column_type(row[:db_type])
row
end
sch.compact!
if pks > 1
# SQLite does not allow use of auto increment for tables
# with composite primary keys, so remove auto_increment
# if composite primary keys are detected.
sch.each{|r| r.delete(:auto_increment)}
end
sch
end
# SQLite supports schema parsing using the table_info PRAGMA, so
# parse the output of that into the format Sequel expects.
def schema_parse_table(table_name, opts)
m = output_identifier_meth(opts[:dataset])
parse_pragma(table_name, opts).map do |row|
[m.call(row.delete(:name)), row]
end
end
# Don't support SQLite error codes for exceptions by default.
def sqlite_error_code(exception)
nil
end
# Backbone of the tables and views support.
def tables_and_views(filter, opts)
m = output_identifier_meth
metadata_dataset.from(:sqlite_master).server(opts[:server]).where(filter).map{|r| m.call(r[:name])}
end
# SQLite only supports AUTOINCREMENT on integer columns, not
# bigint columns, so use integer instead of bigint for those
# columns.
def type_literal_generic_bignum_symbol(column)
column[:auto_increment] ? :integer : super
end
end
module DatasetMethods
include Dataset::Replace
include UnmodifiedIdentifiers::DatasetMethods
# The allowed values for insert_conflict
INSERT_CONFLICT_RESOLUTIONS = %w'ROLLBACK ABORT FAIL IGNORE REPLACE'.each(&:freeze).freeze
CONSTANT_MAP = {:CURRENT_DATE=>"date(CURRENT_TIMESTAMP, 'localtime')".freeze, :CURRENT_TIMESTAMP=>"datetime(CURRENT_TIMESTAMP, 'localtime')".freeze, :CURRENT_TIME=>"time(CURRENT_TIMESTAMP, 'localtime')".freeze}.freeze
EXTRACT_MAP = {:year=>"'%Y'", :month=>"'%m'", :day=>"'%d'", :hour=>"'%H'", :minute=>"'%M'", :second=>"'%f'"}.freeze
EXTRACT_MAP.each_value(&:freeze)
Dataset.def_sql_method(self, :delete, [['if db.sqlite_version >= 33500', %w'with delete from where returning'], ['elsif db.sqlite_version >= 30803', %w'with delete from where'], ["else", %w'delete from where']])
Dataset.def_sql_method(self, :insert, [['if db.sqlite_version >= 33500', %w'with insert conflict into columns values on_conflict returning'], ['elsif db.sqlite_version >= 30803', %w'with insert conflict into columns values on_conflict'], ["else", %w'insert conflict into columns values']])
Dataset.def_sql_method(self, :select, [['if opts[:values]', %w'with values compounds'], ['else', %w'with select distinct columns from join where group having window compounds order limit lock']])
Dataset.def_sql_method(self, :update, [['if db.sqlite_version >= 33500', %w'with update table set from where returning'], ['elsif db.sqlite_version >= 33300', %w'with update table set from where'], ['elsif db.sqlite_version >= 30803', %w'with update table set where'], ["else", %w'update table set where']])
def cast_sql_append(sql, expr, type)
if type == Time or type == DateTime
sql << "datetime("
literal_append(sql, expr)
sql << ')'
elsif type == Date
sql << "date("
literal_append(sql, expr)
sql << ')'
else
super
end
end
# SQLite doesn't support a NOT LIKE b, you need to use NOT (a LIKE b).
# It doesn't support xor, power, or the extract function natively, so those have to be emulated.
def complex_expression_sql_append(sql, op, args)
case op
when :"NOT LIKE", :"NOT ILIKE"
sql << 'NOT '
complex_expression_sql_append(sql, (op == :"NOT ILIKE" ? :ILIKE : :LIKE), args)
when :^
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.lit(["((~(", " & ", ")) & (", " | ", "))"], a, b, a, b)}
when :**
unless (exp = args[1]).is_a?(Integer)
raise(Sequel::Error, "can only emulate exponentiation on SQLite if exponent is an integer, given #{exp.inspect}")
end
case exp
when 0
sql << '1'
else
sql << '('
arg = args[0]
if exp < 0
invert = true
exp = exp.abs
sql << '(1.0 / ('
end
(exp - 1).times do
literal_append(sql, arg)
sql << " * "
end
literal_append(sql, arg)
sql << ')'
if invert
sql << "))"
end
end
when :extract
part = args[0]
raise(Sequel::Error, "unsupported extract argument: #{part.inspect}") unless format = EXTRACT_MAP[part]
sql << "CAST(strftime(" << format << ', '
literal_append(sql, args[1])
sql << ') AS ' << (part == :second ? 'NUMERIC' : 'INTEGER') << ')'
else
super
end
end
# SQLite has CURRENT_TIMESTAMP and related constants in UTC instead
# of in localtime, so convert those constants to local time.
def constant_sql_append(sql, constant)
if (c = CONSTANT_MAP[constant]) && !db.current_timestamp_utc
sql << c
else
super
end
end
# SQLite performs a TRUNCATE style DELETE if no filter is specified.
# Since we want to always return the count of records, add a condition
# that is always true and then delete.
def delete(&block)
@opts[:where] ? super : where(1=>1).delete(&block)
end
# Return an array of strings specifying a query explanation for a SELECT of the
# current dataset. Currently, the options are ignored, but it accepts options
# to be compatible with other adapters.
def explain(opts=nil)
# Load the PrettyTable class, needed for explain output
Sequel.extension(:_pretty_table) unless defined?(Sequel::PrettyTable)
ds = db.send(:metadata_dataset).clone(:sql=>"EXPLAIN #{select_sql}")
rows = ds.all
Sequel::PrettyTable.string(rows, ds.columns)
end
# HAVING requires GROUP BY on SQLite
def having(*cond)
raise(InvalidOperation, "Can only specify a HAVING clause on a grouped dataset") if !@opts[:group] && db.sqlite_version < 33900
super
end
# Support insert select for associations, so that the model code can use
# returning instead of a separate query.
def insert_select(*values)
return unless supports_insert_select?
# Handle case where query does not return a row
server?(:default).with_sql_first(insert_select_sql(*values)) || false
end
# The SQL to use for an insert_select, adds a RETURNING clause to the insert
# unless the RETURNING clause is already present.
def insert_select_sql(*values)
ds = opts[:returning] ? self : returning
ds.insert_sql(*values)
end
# SQLite uses the nonstandard ` (backtick) for quoting identifiers.
def quoted_identifier_append(sql, c)
sql << '`' << c.to_s.gsub('`', '``') << '`'
end
# When a qualified column is selected on SQLite and the qualifier
# is a subselect, the column name used is the full qualified name
# (including the qualifier) instead of just the column name. To
# get correct column names, you must use an alias.
def select(*cols)
if ((f = @opts[:from]) && f.any?{|t| t.is_a?(Dataset) || (t.is_a?(SQL::AliasedExpression) && t.expression.is_a?(Dataset))}) || ((j = @opts[:join]) && j.any?{|t| t.table.is_a?(Dataset)})
super(*cols.map{|c| alias_qualified_column(c)})
else
super
end
end
# Handle uniqueness violations when inserting, by using a specified
# resolution algorithm. With no options, uses INSERT OR REPLACE. SQLite
# supports the following conflict resolution algoriths: ROLLBACK, ABORT,
# FAIL, IGNORE and REPLACE.
#
# On SQLite 3.24.0+, you can pass a hash to use an ON CONFLICT clause.
# With out :update option, uses ON CONFLICT DO NOTHING. Options:
#
# :conflict_where :: The index filter, when using a partial index to determine uniqueness.
# :target :: The column name or expression to handle uniqueness violations on.
# :update :: A hash of columns and values to set. Uses ON CONFLICT DO UPDATE.
# :update_where :: A WHERE condition to use for the update.
#
# Examples:
#
# DB[:table].insert_conflict.insert(a: 1, b: 2)
# # INSERT OR IGNORE INTO TABLE (a, b) VALUES (1, 2)
#
# DB[:table].insert_conflict(:replace).insert(a: 1, b: 2)
# # INSERT OR REPLACE INTO TABLE (a, b) VALUES (1, 2)
#
# DB[:table].insert_conflict({}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT DO NOTHING
#
# DB[:table].insert_conflict(target: :a).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) DO NOTHING
#
# DB[:table].insert_conflict(target: :a, conflict_where: {c: true}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) WHERE (c IS TRUE) DO NOTHING
#
# DB[:table].insert_conflict(target: :a, update: {b: Sequel[:excluded][:b]}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) DO UPDATE SET b = excluded.b
#
# DB[:table].insert_conflict(target: :a,
# update: {b: Sequel[:excluded][:b]}, update_where: {Sequel[:table][:status_id] => 1}).insert(a: 1, b: 2)
# # INSERT INTO TABLE (a, b) VALUES (1, 2)
# # ON CONFLICT (a) DO UPDATE SET b = excluded.b WHERE (table.status_id = 1)
def insert_conflict(opts = :ignore)
case opts
when Symbol, String
unless INSERT_CONFLICT_RESOLUTIONS.include?(opts.to_s.upcase)
raise Error, "Invalid symbol or string passed to Dataset#insert_conflict: #{opts.inspect}. The allowed values are: :rollback, :abort, :fail, :ignore, or :replace"
end
clone(:insert_conflict => opts)
when Hash
clone(:insert_on_conflict => opts)
else
raise Error, "Invalid value passed to Dataset#insert_conflict: #{opts.inspect}, should use a symbol or a hash"
end
end
# Ignore uniqueness/exclusion violations when inserting, using INSERT OR IGNORE.
# Exists mostly for compatibility to MySQL's insert_ignore. Example:
#
# DB[:table].insert_ignore.insert(a: 1, b: 2)
# # INSERT OR IGNORE INTO TABLE (a, b) VALUES (1, 2)
def insert_ignore
insert_conflict(:ignore)
end
# Automatically add aliases to RETURNING values to work around SQLite bug.
def returning(*values)
return super if values.empty?
raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
clone(:returning=>_returning_values(values).freeze)
end
# SQLite 3.8.3+ supports common table expressions.
def supports_cte?(type=:select)
db.sqlite_version >= 30803
end
# SQLite supports CTEs in subqueries if it supports CTEs.
def supports_cte_in_subqueries?
supports_cte?
end
# SQLite does not support table aliases with column aliases
def supports_derived_column_lists?
false
end
# SQLite does not support deleting from a joined dataset
def supports_deleting_joins?
false
end
# SQLite does not support INTERSECT ALL or EXCEPT ALL
def supports_intersect_except_all?
false
end
# SQLite does not support IS TRUE
def supports_is_true?
false
end
# SQLite 3.33.0 supports modifying joined datasets
def supports_modifying_joins?
db.sqlite_version >= 33300
end
# SQLite does not support multiple columns for the IN/NOT IN operators
def supports_multiple_column_in?
false
end
# SQLite 3.35.0 supports RETURNING on INSERT/UPDATE/DELETE.
def supports_returning?(_)
db.sqlite_version >= 33500
end
# SQLite supports timezones in literal timestamps, since it stores them
# as text. But using timezones in timestamps breaks SQLite datetime
# functions, so we allow the user to override the default per database.
def supports_timestamp_timezones?
db.use_timestamp_timezones?
end
# SQLite cannot use WHERE 't'.
def supports_where_true?
false
end
# SQLite 3.28+ supports the WINDOW clause.
def supports_window_clause?
db.sqlite_version >= 32800
end
# SQLite 3.25+ supports window functions. However, support is only enabled
# on SQLite 3.26.0+ because internal Sequel usage of window functions
# to implement eager loading of limited associations triggers
# an SQLite crash bug in versions 3.25.0-3.25.3.
def supports_window_functions?
db.sqlite_version >= 32600
end
# SQLite 3.28.0+ supports all window frame options that Sequel supports
def supports_window_function_frame_option?(option)
db.sqlite_version >= 32800 ? true : super
end
private
# Add aliases to symbols and identifiers to work around SQLite bug.
def _returning_values(values)
values.map do |v|
case v
when Symbol
_, c, a = split_symbol(v)
a ? v : Sequel.as(v, c)
when SQL::Identifier, SQL::QualifiedIdentifier
Sequel.as(v, unqualified_column_for(v))
else
v
end
end
end
# SQLite uses string literals instead of identifiers in AS clauses.
def as_sql_append(sql, aliaz, column_aliases=nil)
raise Error, "sqlite does not support derived column lists" if column_aliases
aliaz = aliaz.value if aliaz.is_a?(SQL::Identifier)
sql << ' AS '
literal_append(sql, aliaz.to_s)
end
# If col is a qualified column, alias it to the same as the column name
def alias_qualified_column(col)
case col
when Symbol
t, c, a = split_symbol(col)
if t && !a
alias_qualified_column(SQL::QualifiedIdentifier.new(t, c))
else
col
end
when SQL::QualifiedIdentifier
SQL::AliasedExpression.new(col, col.column)
else
col
end
end
# Raise an InvalidOperation exception if insert is not allowed for this dataset.
def check_insert_allowed!
raise(InvalidOperation, "Grouped datasets cannot be modified") if opts[:group]
raise(InvalidOperation, "Joined datasets cannot be modified") if joined_dataset?
end
alias check_delete_allowed! check_insert_allowed!
# SQLite supports a maximum of 500 rows in a VALUES clause.
def default_import_slice
500
end
# SQL fragment specifying a list of identifiers
def identifier_list(columns)
columns.map{|i| quote_identifier(i)}.join(', ')
end
# Add OR clauses to SQLite INSERT statements
def insert_conflict_sql(sql)
if resolution = @opts[:insert_conflict]
sql << " OR " << resolution.to_s.upcase
end
end
# Add ON CONFLICT clause if it should be used
def insert_on_conflict_sql(sql)
if opts = @opts[:insert_on_conflict]
sql << " ON CONFLICT"
if target = opts[:constraint]
sql << " ON CONSTRAINT "
identifier_append(sql, target)
elsif target = opts[:target]
sql << ' '
identifier_append(sql, Array(target))
if conflict_where = opts[:conflict_where]
sql << " WHERE "
literal_append(sql, conflict_where)
end
end
if values = opts[:update]
sql << " DO UPDATE SET "
update_sql_values_hash(sql, values)
if update_where = opts[:update_where]
sql << " WHERE "
literal_append(sql, update_where)
end
else
sql << " DO NOTHING"
end
end
end
# SQLite uses a preceding X for hex escaping strings
def literal_blob_append(sql, v)
sql << "X'" << v.unpack("H*").first << "'"
end
# Respect the database integer_booleans setting, using 0 or 'f'.
def literal_false
@db.integer_booleans ? '0' : "'f'"
end
# Respect the database integer_booleans setting, using 1 or 't'.
def literal_true
@db.integer_booleans ? '1' : "'t'"
end
# SQLite only supporting multiple rows in the VALUES clause
# starting in 3.7.11. On older versions, fallback to using a UNION.
def multi_insert_sql_strategy
db.sqlite_version >= 30711 ? :values : :union
end
# Emulate the char_length function with length
def native_function_name(emulated_function)
if emulated_function == :char_length
'length'
else
super
end
end
# SQLite supports NULLS FIRST/LAST natively in 3.30+.
def requires_emulating_nulls_first?
db.sqlite_version < 33000
end
# SQLite does not support FOR UPDATE, but silently ignore it
# instead of raising an error for compatibility with other
# databases.
def select_lock_sql(sql)
super unless @opts[:lock] == :update
end
def select_only_offset_sql(sql)
sql << " LIMIT -1 OFFSET "
literal_append(sql, @opts[:offset])
end
# Support VALUES clause instead of the SELECT clause to return rows.
def select_values_sql(sql)
sql << "VALUES "
expression_list_append(sql, opts[:values])
end
# SQLite does not support CTEs directly inside UNION/INTERSECT/EXCEPT.
def supports_cte_in_compounds?
false
end
# SQLite 3.30 supports the FILTER clause for aggregate functions.
def supports_filtered_aggregates?
db.sqlite_version >= 33000
end
# SQLite supports quoted function names.
def supports_quoted_function_names?
true
end
# SQLite treats a DELETE with no WHERE clause as a TRUNCATE
def _truncate_sql(table)
"DELETE FROM #{table}"
end
# Use FROM to specify additional tables in an update query
def update_from_sql(sql)
if(from = @opts[:from][1..-1]).empty?
raise(Error, 'Need multiple FROM tables if updating/deleting a dataset with JOINs') if @opts[:join]
else
sql << ' FROM '
source_list_append(sql, from)
select_join_sql(sql)
end
end
# Only include the primary table in the main update clause
def update_table_sql(sql)
sql << ' '
source_list_append(sql, @opts[:from][0..0])
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/sqlanywhere.rb 0000664 0000000 0000000 00000012425 14342141206 0021642 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'sqlanywhere'
require_relative 'shared/sqlanywhere'
module Sequel
module SqlAnywhere
class SQLAnywhereException < StandardError
attr_reader :errno
attr_reader :sql
def initialize(message, errno, sql)
super(message)
@errno = errno
@sql = sql
end
end
boolean = Object.new
def boolean.call(s)
s.to_i != 0
end
date = Object.new
def date.call(s)
::Date.strptime(s)
end
decimal = Object.new
class << decimal
alias call BigDecimal
public :call
end
time = Object.new
def time.call(s)
::Sequel.string_to_time(s)
end
SQLANYWHERE_TYPES = {}
{
[0, 484] => decimal,
[384] => date,
[388] => time,
[500] => boolean,
[524, 528] => ::Sequel::SQL::Blob
}.each do |k,v|
k.each{|n| SQLANYWHERE_TYPES[n] = v}
end
SQLANYWHERE_TYPES.freeze
class Database < Sequel::Database
include Sequel::SqlAnywhere::DatabaseMethods
attr_accessor :api
set_adapter_scheme :sqlanywhere
def connect(server)
opts = server_opts(server)
unless conn_string = opts[:conn_string]
conn_string = []
conn_string << "Host=#{opts[:host]}#{":#{opts[:port]}" if opts[:port]}" if opts[:host]
conn_string << "DBN=#{opts[:database]}" if opts[:database]
conn_string << "UID=#{opts[:user]}" if opts[:user]
conn_string << "Password=#{opts[:password]}" if opts[:password]
conn_string << "CommLinks=#{opts[:commlinks]}" if opts[:commlinks]
conn_string << "ConnectionName=#{opts[:connection_name]}" if opts[:connection_name]
conn_string << "CharSet=#{opts[:encoding]}" if opts[:encoding]
conn_string << "Idle=0" # Prevent the server from disconnecting us if we're idle for >240mins (by default)
conn_string << nil
conn_string = conn_string.join(';')
end
conn = @api.sqlany_new_connection
raise LoadError, "Could not connect" unless conn && @api.sqlany_connect(conn, conn_string) == 1
if Sequel.application_timezone == :utc
@api.sqlany_execute_immediate(conn, "SET TEMPORARY OPTION time_zone_adjustment=0")
end
conn
end
def disconnect_connection(c)
@api.sqlany_disconnect(c)
end
def execute_dui(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
_execute(conn, :rows, sql, opts)
end
end
def execute(sql, opts=OPTS, &block)
synchronize(opts[:server]) do |conn|
_execute(conn, :select, sql, opts, &block)
end
end
def execute_insert(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
_execute(conn, :insert, sql, opts)
end
end
def freeze
@conversion_procs.freeze
super
end
private
def _execute(conn, type, sql, opts)
unless rs = log_connection_yield(sql, conn){@api.sqlany_execute_direct(conn, sql)}
result, errstr = @api.sqlany_error(conn)
raise_error(SQLAnywhereException.new(errstr, result, sql))
end
case type
when :select
yield rs if defined?(yield)
when :rows
return @api.sqlany_affected_rows(rs)
when :insert
_execute(conn, :select, 'SELECT @@IDENTITY', opts){|r| return @api.sqlany_get_column(r, 0)[1] if r && @api.sqlany_fetch_next(r) == 1}
end
ensure
@api.sqlany_commit(conn) unless in_transaction?
@api.sqlany_free_stmt(rs) if rs
end
def adapter_initialize
@convert_smallint_to_bool = true
@conversion_procs = SQLANYWHERE_TYPES.dup
@conversion_procs[392] = method(:to_application_timestamp_sa)
@api = SQLAnywhere::SQLAnywhereInterface.new
raise LoadError, "Could not load SQLAnywhere DBCAPI library" if SQLAnywhere::API.sqlany_initialize_interface(@api) == 0
raise LoadError, "Could not initialize SQLAnywhere DBCAPI library" if @api.sqlany_init == 0
end
def dataset_class_default
Dataset
end
def log_connection_execute(conn, sql)
_execute(conn, nil, sql, OPTS)
end
end
class Dataset < Sequel::Dataset
include Sequel::SqlAnywhere::DatasetMethods
def fetch_rows(sql)
db = @db
cps = db.conversion_procs
api = db.api
execute(sql) do |rs|
convert = convert_smallint_to_bool
col_infos = []
api.sqlany_num_cols(rs).times do |i|
_, _, name, _, type = api.sqlany_get_column_info(rs, i)
cp = if type == 500
cps[500] if convert
else
cps[type]
end
col_infos << [output_identifier(name), cp]
end
self.columns = col_infos.map(&:first)
max = col_infos.length
if rs
while api.sqlany_fetch_next(rs) == 1
i = -1
h = {}
while (i+=1) < max
name, cp = col_infos[i]
v = api.sqlany_get_column(rs, i)[1]
h[name] = cp && v ? cp.call(v) : v
end
yield h
end
end
end
self
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/sqlite.rb 0000664 0000000 0000000 00000031511 14342141206 0020576 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'sqlite3'
require_relative 'shared/sqlite'
module Sequel
module SQLite
FALSE_VALUES = (%w'0 false f no n'.each(&:freeze) + [0]).freeze
blob = Object.new
def blob.call(s)
Sequel::SQL::Blob.new(s.to_s)
end
boolean = Object.new
def boolean.call(s)
s = s.downcase if s.is_a?(String)
!FALSE_VALUES.include?(s)
end
date = Object.new
def date.call(s)
case s
when String
Sequel.string_to_date(s)
when Integer
Date.jd(s)
when Float
Date.jd(s.to_i)
else
raise Sequel::Error, "unhandled type when converting to date: #{s.inspect} (#{s.class.inspect})"
end
end
integer = Object.new
def integer.call(s)
s.to_i
end
float = Object.new
def float.call(s)
s.to_f
end
numeric = Object.new
def numeric.call(s)
s = s.to_s unless s.is_a?(String)
BigDecimal(s) rescue s
end
time = Object.new
def time.call(s)
case s
when String
Sequel.string_to_time(s)
when Integer
Sequel::SQLTime.create(s/3600, (s % 3600)/60, s % 60)
when Float
s, f = s.divmod(1)
Sequel::SQLTime.create(s/3600, (s % 3600)/60, s % 60, (f*1000000).round)
else
raise Sequel::Error, "unhandled type when converting to date: #{s.inspect} (#{s.class.inspect})"
end
end
# Hash with string keys and callable values for converting SQLite types.
SQLITE_TYPES = {}
{
%w'date' => date,
%w'time' => time,
%w'bit bool boolean' => boolean,
%w'integer smallint mediumint int bigint' => integer,
%w'numeric decimal money' => numeric,
%w'float double real dec fixed' + ['double precision'] => float,
%w'blob' => blob
}.each do |k,v|
k.each{|n| SQLITE_TYPES[n] = v}
end
SQLITE_TYPES.freeze
sqlite_version = SQLite3::VERSION.split('.').map(&:to_i)[0..1]
sqlite_version = sqlite_version[0] * 100 + sqlite_version[1]
USE_EXTENDED_RESULT_CODES = sqlite_version >= 104
class Database < Sequel::Database
include ::Sequel::SQLite::DatabaseMethods
set_adapter_scheme :sqlite
# Mimic the file:// uri, by having 2 preceding slashes specify a relative
# path, and 3 preceding slashes specify an absolute path.
def self.uri_to_options(uri) # :nodoc:
{ :database => (uri.host.nil? && uri.path == '/') ? nil : "#{uri.host}#{uri.path}" }
end
private_class_method :uri_to_options
# The conversion procs to use for this database
attr_reader :conversion_procs
def initialize(opts = OPTS)
super
@allow_regexp = typecast_value_boolean(opts[:setup_regexp_function])
end
# Connect to the database. Since SQLite is a file based database,
# available options are limited:
#
# :database :: database name (filename or ':memory:' or file: URI)
# :readonly :: open database in read-only mode; useful for reading
# static data that you do not want to modify
# :timeout :: how long to wait for the database to be available if it
# is locked, given in milliseconds (default is 5000)
def connect(server)
opts = server_opts(server)
opts[:database] = ':memory:' if blank_object?(opts[:database])
sqlite3_opts = {}
sqlite3_opts[:readonly] = typecast_value_boolean(opts[:readonly]) if opts.has_key?(:readonly)
db = ::SQLite3::Database.new(opts[:database].to_s, sqlite3_opts)
db.busy_timeout(typecast_value_integer(opts.fetch(:timeout, 5000)))
if USE_EXTENDED_RESULT_CODES
db.extended_result_codes = true
end
connection_pragmas.each{|s| log_connection_yield(s, db){db.execute_batch(s)}}
if typecast_value_boolean(opts[:setup_regexp_function])
db.create_function("regexp", 2) do |func, regexp_str, string|
func.result = Regexp.new(regexp_str).match(string) ? 1 : 0
end
end
class << db
attr_reader :prepared_statements
end
db.instance_variable_set(:@prepared_statements, {})
db
end
# Whether this Database instance is setup to allow regexp matching.
# True if the :setup_regexp_function option was passed when creating the Database.
def allow_regexp?
@allow_regexp
end
# Disconnect given connections from the database.
def disconnect_connection(c)
c.prepared_statements.each_value{|v| v.first.close}
c.close
end
# Run the given SQL with the given arguments and yield each row.
def execute(sql, opts=OPTS, &block)
_execute(:select, sql, opts, &block)
end
# Run the given SQL with the given arguments and return the number of changed rows.
def execute_dui(sql, opts=OPTS)
_execute(:update, sql, opts)
end
# Drop any prepared statements on the connection when executing DDL. This is because
# prepared statements lock the table in such a way that you can't drop or alter the
# table while a prepared statement that references it still exists.
def execute_ddl(sql, opts=OPTS)
synchronize(opts[:server]) do |conn|
conn.prepared_statements.values.each{|cps, s| cps.close}
conn.prepared_statements.clear
super
end
end
def execute_insert(sql, opts=OPTS)
_execute(:insert, sql, opts)
end
def freeze
@conversion_procs.freeze
super
end
# Handle Integer and Float arguments, since SQLite can store timestamps as integers and floats.
def to_application_timestamp(s)
case s
when String
super
when Integer
super(Time.at(s).to_s)
when Float
super(DateTime.jd(s).to_s)
else
raise Sequel::Error, "unhandled type when converting to : #{s.inspect} (#{s.class.inspect})"
end
end
private
def adapter_initialize
@conversion_procs = SQLITE_TYPES.dup
@conversion_procs['datetime'] = @conversion_procs['timestamp'] = method(:to_application_timestamp)
set_integer_booleans
end
# Yield an available connection. Rescue
# any SQLite3::Exceptions and turn them into DatabaseErrors.
def _execute(type, sql, opts, &block)
synchronize(opts[:server]) do |conn|
return execute_prepared_statement(conn, type, sql, opts, &block) if sql.is_a?(Symbol)
log_args = opts[:arguments]
args = {}
opts.fetch(:arguments, OPTS).each{|k, v| args[k] = prepared_statement_argument(v)}
case type
when :select
log_connection_yield(sql, conn, log_args){conn.query(sql, args, &block)}
when :insert
log_connection_yield(sql, conn, log_args){conn.execute(sql, args)}
conn.last_insert_row_id
when :update
log_connection_yield(sql, conn, log_args){conn.execute_batch(sql, args)}
conn.changes
end
end
rescue SQLite3::Exception => e
raise_error(e)
end
# The SQLite adapter does not need the pool to convert exceptions.
# Also, force the max connections to 1 if a memory database is being
# used, as otherwise each connection gets a separate database.
def connection_pool_default_options
o = super.dup
# Default to only a single connection if a memory database is used,
# because otherwise each connection will get a separate database
o[:max_connections] = 1 if @opts[:database] == ':memory:' || blank_object?(@opts[:database])
o
end
def prepared_statement_argument(arg)
case arg
when Date, DateTime, Time
literal(arg)[1...-1]
when SQL::Blob
arg.to_blob
when true, false
if integer_booleans
arg ? 1 : 0
else
literal(arg)[1...-1]
end
else
arg
end
end
# Execute a prepared statement on the database using the given name.
def execute_prepared_statement(conn, type, name, opts, &block)
ps = prepared_statement(name)
sql = ps.prepared_sql
args = opts[:arguments]
ps_args = {}
args.each{|k, v| ps_args[k] = prepared_statement_argument(v)}
if cpsa = conn.prepared_statements[name]
cps, cps_sql = cpsa
if cps_sql != sql
cps.close
cps = nil
end
end
unless cps
cps = log_connection_yield("PREPARE #{name}: #{sql}", conn){conn.prepare(sql)}
conn.prepared_statements[name] = [cps, sql]
end
log_sql = String.new
log_sql << "EXECUTE #{name}"
if ps.log_sql
log_sql << " ("
log_sql << sql
log_sql << ")"
end
if block
log_connection_yield(log_sql, conn, args){cps.execute(ps_args, &block)}
else
log_connection_yield(log_sql, conn, args){cps.execute!(ps_args){|r|}}
case type
when :insert
conn.last_insert_row_id
when :update
conn.changes
end
end
end
# SQLite3 raises ArgumentError in addition to SQLite3::Exception in
# some cases, such as operations on a closed database.
def database_error_classes
[SQLite3::Exception, ArgumentError]
end
def dataset_class_default
Dataset
end
if USE_EXTENDED_RESULT_CODES
# Support SQLite exception codes if ruby-sqlite3 supports them.
def sqlite_error_code(exception)
exception.code if exception.respond_to?(:code)
end
end
end
class Dataset < Sequel::Dataset
include ::Sequel::SQLite::DatasetMethods
module ArgumentMapper
include Sequel::Dataset::ArgumentMapper
protected
# Return a hash with the same values as the given hash,
# but with the keys converted to strings.
def map_to_prepared_args(hash)
args = {}
hash.each{|k,v| args[k.to_s.gsub('.', '__')] = v}
args
end
private
# SQLite uses a : before the name of the argument for named
# arguments.
def prepared_arg(k)
LiteralString.new("#{prepared_arg_placeholder}#{k.to_s.gsub('.', '__')}")
end
end
BindArgumentMethods = prepared_statements_module(:bind, ArgumentMapper)
PreparedStatementMethods = prepared_statements_module(:prepare, BindArgumentMethods)
# Support regexp functions if using :setup_regexp_function Database option.
def complex_expression_sql_append(sql, op, args)
case op
when :~, :'!~', :'~*', :'!~*'
return super unless supports_regexp?
case_insensitive = [:'~*', :'!~*'].include?(op)
sql << 'NOT ' if [:'!~', :'!~*'].include?(op)
sql << '('
sql << 'LOWER(' if case_insensitive
literal_append(sql, args[0])
sql << ')' if case_insensitive
sql << ' REGEXP '
sql << 'LOWER(' if case_insensitive
literal_append(sql, args[1])
sql << ')' if case_insensitive
sql << ')'
else
super
end
end
def fetch_rows(sql)
execute(sql) do |result|
cps = db.conversion_procs
type_procs = result.types.map{|t| cps[base_type_name(t)]}
j = -1
cols = result.columns.map{|c| [output_identifier(c), type_procs[(j+=1)]]}
self.columns = cols.map(&:first)
max = cols.length
result.each do |values|
row = {}
i = -1
while (i += 1) < max
name, type_proc = cols[i]
v = values[i]
if type_proc && v
v = type_proc.call(v)
end
row[name] = v
end
yield row
end
end
end
# Support regexp if using :setup_regexp_function Database option.
def supports_regexp?
db.allow_regexp?
end
private
# The base type name for a given type, without any parenthetical part.
def base_type_name(t)
(t =~ /^(.*?)\(/ ? $1 : t).downcase if t
end
# Quote the string using the adapter class method.
def literal_string_append(sql, v)
sql << "'" << ::SQLite3::Database.quote(v) << "'"
end
def bound_variable_modules
[BindArgumentMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
# SQLite uses a : before the name of the argument as a placeholder.
def prepared_arg_placeholder
':'
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/tinytds.rb 0000664 0000000 0000000 00000020022 14342141206 0020766 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require 'tiny_tds'
require_relative 'shared/mssql'
module Sequel
module TinyTDS
class Database < Sequel::Database
include Sequel::MSSQL::DatabaseMethods
set_adapter_scheme :tinytds
# Transfer the :user option to the :username option.
def connect(server)
opts = server_opts(server)
opts[:username] = opts[:user]
c = TinyTds::Client.new(opts)
c.query_options.merge!(:cache_rows=>false)
# SEQUEL6: Default to ansi: true
if opts[:ansi]
sql = %w(
ANSI_NULLS
ANSI_PADDING
ANSI_WARNINGS
ANSI_NULL_DFLT_ON
QUOTED_IDENTIFIER
CONCAT_NULL_YIELDS_NULL
).map{|v| "SET #{v} ON"}.join(";")
log_connection_yield(sql, c){c.execute(sql)}
end
if (ts = opts[:textsize])
sql = "SET TEXTSIZE #{typecast_value_integer(ts)}"
log_connection_yield(sql, c){c.execute(sql)}
end
c
end
# Execute the given +sql+ on the server. If the :return option
# is present, its value should be a method symbol that is called
# on the TinyTds::Result object returned from executing the
# +sql+. The value of such a method is returned to the caller.
# Otherwise, if a block is given, it is yielded the result object.
# If no block is given and a :return is not present, +nil+ is returned.
def execute(sql, opts=OPTS)
synchronize(opts[:server]) do |c|
begin
m = opts[:return]
r = nil
if (args = opts[:arguments]) && !args.empty?
types = []
values = []
args.each_with_index do |(k, v), i|
v, type = ps_arg_type(v)
types << "@#{k} #{type}"
values << "@#{k} = #{v}"
end
case m
when :do
sql = "#{sql}; SELECT @@ROWCOUNT AS AffectedRows"
single_value = true
when :insert
sql = "#{sql}; SELECT CAST(SCOPE_IDENTITY() AS bigint) AS Ident"
single_value = true
end
sql = "EXEC sp_executesql N'#{c.escape(sql)}', N'#{c.escape(types.join(', '))}', #{values.join(', ')}"
log_connection_yield(sql, c) do
r = c.execute(sql)
r.each{|row| return row.values.first} if single_value
end
else
log_connection_yield(sql, c) do
r = c.execute(sql)
return r.public_send(m) if m
end
end
yield(r) if defined?(yield)
rescue TinyTds::Error => e
raise_error(e, :disconnect=>!c.active?)
ensure
r.cancel if r && c.sqlsent? && c.active?
end
end
end
def execute_dui(sql, opts=OPTS)
opts = Hash[opts]
opts[:return] = :do
execute(sql, opts)
end
def execute_insert(sql, opts=OPTS)
opts = Hash[opts]
opts[:return] = :insert
execute(sql, opts)
end
def execute_ddl(sql, opts=OPTS)
opts = Hash[opts]
opts[:return] = :each
execute(sql, opts)
nil
end
private
# Choose whether to use unicode strings on initialization
def adapter_initialize
set_mssql_unicode_strings
end
# For some reason, unless you specify a column can be
# NULL, it assumes NOT NULL, so turn NULL on by default unless
# the column is a primary key column.
def column_list_sql(g)
pks = []
g.constraints.each{|c| pks = c[:columns] if c[:type] == :primary_key}
g.columns.each{|c| c[:null] = true if !pks.include?(c[:name]) && !c[:primary_key] && !c.has_key?(:null) && !c.has_key?(:allow_null)}
super
end
# tiny_tds uses TinyTds::Error as the base error class.
def database_error_classes
[TinyTds::Error]
end
# Stupid MSSQL maps foreign key and check constraint violations
# to the same error code, and doesn't expose the sqlstate. Use
# database error numbers if present and unambiguous, otherwise
# fallback to the regexp mapping.
def database_specific_error_class(exception, opts)
case exception.db_error_number
when 515
NotNullConstraintViolation
when 2627
UniqueConstraintViolation
else
super
end
end
def dataset_class_default
Dataset
end
# Return true if the :conn argument is present and not active.
def disconnect_error?(e, opts)
super || (opts[:conn] && !opts[:conn].active?) || ((e.is_a?(::TinyTds::Error) && /\A(Attempt to initiate a new Adaptive Server operation with results pending|The request failed to run because the batch is aborted, this can be caused by abort signal sent from client|Adaptive Server connection timed out|DBPROCESS is dead or not enabled)/.match(e.message)))
end
# Dispose of any possible results of execution.
def log_connection_execute(conn, sql)
log_connection_yield(sql, conn){conn.execute(sql).each}
end
# Return a 2 element array with the literal value and type to use
# in the prepared statement call for the given value and connection.
def ps_arg_type(v)
case v
when Integer
[v, 'bigint']
when Float
[v, 'double precision']
when Numeric
[v, 'numeric']
when Time
if v.is_a?(SQLTime)
[literal(v), 'time']
else
[literal(v), 'datetime']
end
when DateTime
[literal(v), 'datetime']
when Date
[literal(v), 'date']
when nil
['NULL', 'nvarchar(max)']
when true
['1', 'int']
when false
['0', 'int']
when SQL::Blob
[literal(v), 'varbinary(max)']
else
[literal(v), 'nvarchar(max)']
end
end
end
class Dataset < Sequel::Dataset
include Sequel::MSSQL::DatasetMethods
module ArgumentMapper
include Sequel::Dataset::ArgumentMapper
protected
def map_to_prepared_args(hash)
args = {}
hash.each{|k,v| args[k.to_s.gsub('.', '__')] = v}
args
end
private
def prepared_arg(k)
LiteralString.new("@#{k.to_s.gsub('.', '__')}")
end
end
PreparedStatementMethods = prepared_statements_module("sql = prepared_sql; opts = Hash[opts]; opts[:arguments] = bind_arguments", ArgumentMapper)
def fetch_rows(sql)
execute(sql) do |result|
# Mutating an array in the result is questionable, but supported
# by tiny_tds developers (tiny_tds issue #57)
columns = result.fields.map!{|c| output_identifier(c)}
if columns.empty?
args = []
args << {:timezone=>:utc} if db.timezone == :utc
cols = nil
result.each(*args) do |r|
unless cols
cols = result.fields.map{|c| [c, output_identifier(c)]}
self.columns = columns = cols.map(&:last)
end
h = {}
cols.each do |s, sym|
h[sym] = r[s]
end
yield h
end
else
self.columns = columns
if db.timezone == :utc
result.each(:timezone=>:utc){|r| yield r}
else
result.each{|r| yield r}
end
end
end
self
end
private
# Properly escape the given string
def literal_string_append(sql, v)
sql << (mssql_unicode_strings ? "N'" : "'")
sql << db.synchronize(@opts[:server]){|c| c.escape(v)}.gsub(/\\((?:\r\n)|\n)/, '\\\\\\\\\\1\\1') << "'"
end
def prepared_statement_modules
[PreparedStatementMethods]
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/ 0000775 0000000 0000000 00000000000 14342141206 0020107 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/adapters/utils/columns_limit_1.rb 0000664 0000000 0000000 00000001060 14342141206 0023527 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
module ColumnsLimit1
COLUMNS_CLONE_OPTIONS = {:distinct => nil, :limit => 1, :offset=>nil, :where=>nil, :having=>nil, :order=>nil, :row_proc=>nil, :graph=>nil, :eager_graph=>nil}.freeze
# Use a limit of 1 instead of a limit of 0 when
# getting the columns.
def columns!
ds = clone(COLUMNS_CLONE_OPTIONS)
ds.each{break}
if cols = ds.cache[:_columns]
self.columns = cols
else
[]
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/emulate_offset_with_reverse_and_count.rb 0000664 0000000 0000000 00000005010 14342141206 0030252 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module EmulateOffsetWithReverseAndCount
# Make empty? work with an offset with an order.
# By default it would break since the order would be based on
# a column that empty does not select.
def empty?
if o = @opts[:offset]
unlimited.count <= o
else
super
end
end
# Emulate OFFSET support using reverse order in a subselect, requiring
# a count of the number of rows.
#
# If offset is used, an order must be provided, since it needs to be
# reversed in the subselect. Note that the order needs to be unambiguous
# to work correctly, and you must select all columns that you are ordering on.
def select_sql
return super if @opts[:sql]
return super unless o = @opts[:offset]
order = @opts[:order] || default_offset_order
if order.nil? || order.empty?
raise(Error, "#{db.database_type} requires an order be provided if using an offset")
end
ds = unlimited
row_count = @opts[:offset_total_count] || ds.clone(:append_sql=>String.new, :placeholder_literal_null=>true).count
dsa1 = dataset_alias(1)
if o.is_a?(Symbol) && @opts[:bind_vars] && /\A\$(.*)\z/ =~ o
# Handle use of bound variable offsets. Unfortunately, prepared statement
# bound variable offsets cannot be handled, since the bound variable value
# isn't available until later.
o = prepared_arg($1.to_sym)
end
reverse_offset = row_count - o
ds = if reverse_offset > 0
ds.limit(reverse_offset).
reverse(*order).
from_self(:alias=>dsa1).
limit(@opts[:limit]).
order(*order)
else
# Sequel doesn't allow a nonpositive limit. If the offset
# is greater than the number of rows, the empty result set
# shuld be returned, so use a condition that is always false.
ds.where(1=>0)
end
sql = @opts[:append_sql] || String.new
subselect_sql_append(sql, ds)
sql
end
# This does not support offsets in correlated subqueries, as it requires a query to get
# a count that will be invalid if a correlated subquery is used.
def supports_offsets_in_correlated_subqueries?
false
end
private
# The default order to use for datasets with offsets, if no order is defined.
# By default, orders by all of the columns in the dataset.
def default_offset_order
clone(:append_sql=>String.new, :offset=>nil).columns
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/emulate_offset_with_row_number.rb 0000664 0000000 0000000 00000006073 14342141206 0026736 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module EmulateOffsetWithRowNumber
# If the offset must be emulated with ROW_NUMBER, don't remove any ordering,
# because it can cause invalid queries to be issued if an offset is required
# when ordering.
def empty?
return super unless emulate_offset_with_row_number?
select(Dataset::EMPTY_SELECT).limit(1).single_value!.nil?
end
# Emulate OFFSET support with the ROW_NUMBER window function
#
# The implementation is ugly, cloning the current dataset and modifying
# the clone to add a ROW_NUMBER window function (and some other things),
# then using the modified clone in a subselect which is selected from.
#
# If offset is used, an order must be provided, because the use of ROW_NUMBER
# requires an order.
def select_sql
return super unless emulate_offset_with_row_number?
offset = @opts[:offset]
order = @opts[:order]
if require_offset_order?
order ||= default_offset_order
if order.nil? || order.empty?
raise(Error, "#{db.database_type} requires an order be provided if using an offset")
end
end
columns = clone(:append_sql=>String.new, :placeholder_literal_null=>true).columns
dsa1 = dataset_alias(1)
rn = row_number_column
sql = @opts[:append_sql] || String.new
subselect_sql_append(sql, unlimited.
unordered.
select_append(Sequel.function(:ROW_NUMBER).over(:order=>order).as(rn)).
from_self(:alias=>dsa1).
select(*columns).
limit(@opts[:limit]).
where(SQL::Identifier.new(rn) > offset).
order(rn))
sql
end
# This does not support offsets in correlated subqueries, as it requires a query to get
# the columns that will be invalid if a correlated subquery is used.
def supports_offsets_in_correlated_subqueries?
false
end
private
# Allow preparing prepared statements, since determining the prepared sql to use for
# a prepared statement requires calling prepare on that statement.
def allow_preparing_prepared_statements?
true
end
# The default order to use for datasets with offsets, if no order is defined.
# By default, orders by all of the columns in the dataset.
def default_offset_order
if (cols = opts[:select])
cols.each do |c|
case c
when Symbol
return [split_alias(c).first]
when SQL::Identifier, SQL::QualifiedIdentifier
return [c]
when SQL::AliasedExpression
case c.expression
when Symbol, SQL::Identifier, SQL::QualifiedIdentifier
return [c.expression]
end
end
end
end
clone(:append_sql=>String.new).columns
end
# Whether an order is required when using offset emulation via ROW_NUMBER, true by default.
def require_offset_order?
true
end
# Whether to use ROW_NUMBER to emulate offsets
def emulate_offset_with_row_number?
@opts[:offset] && !@opts[:sql]
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/mysql_mysql2.rb 0000664 0000000 0000000 00000006004 14342141206 0023110 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative '../shared/mysql'
require_relative 'stored_procedures'
module Sequel
module MySQL
# This module is used by the mysql and mysql2 adapters to support
# prepared statements and stored procedures.
module MysqlMysql2
module DatabaseMethods
disconnect_errors = <<-END.split("\n").map(&:strip)
Commands out of sync; you can't run this command now
Can't connect to local MySQL server through socket
MySQL server has gone away
Lost connection to MySQL server during query
MySQL client is not connected
This connection is still waiting for a result, try again once you have the result
closed MySQL connection
The MySQL server is running with the --read-only option so it cannot execute this statement
Connection was killed
END
# Error messages for mysql and mysql2 that indicate the current connection should be disconnected
MYSQL_DATABASE_DISCONNECT_ERRORS = /\A#{Regexp.union(disconnect_errors)}/
# Support stored procedures on MySQL
def call_sproc(name, opts=OPTS, &block)
args = opts[:args] || []
execute("CALL #{name}#{args.empty? ? '()' : literal(args)}", opts.merge(:sproc=>false), &block)
end
# Executes the given SQL using an available connection, yielding the
# connection if the block is given.
def execute(sql, opts=OPTS, &block)
if opts[:sproc]
call_sproc(sql, opts, &block)
elsif sql.is_a?(Symbol) || sql.is_a?(Sequel::Dataset::ArgumentMapper)
execute_prepared_statement(sql, opts, &block)
else
synchronize(opts[:server]){|conn| _execute(conn, sql, opts, &block)}
end
end
private
def add_prepared_statements_cache(conn)
class << conn
attr_accessor :prepared_statements
end
conn.prepared_statements = {}
end
def database_specific_error_class(exception, opts)
case exception.errno
when 1048
NotNullConstraintViolation
when 1062
UniqueConstraintViolation
when 1451, 1452, 1216, 1217
ForeignKeyConstraintViolation
when 4025
CheckConstraintViolation
when 1205
DatabaseLockTimeout
else
super
end
end
end
module DatasetMethods
include Sequel::Dataset::StoredProcedures
StoredProcedureMethods = Sequel::Dataset.send(:prepared_statements_module,
"sql = @opts[:sproc_name]; opts = Hash[opts]; opts[:args] = @opts[:sproc_args]; opts[:sproc] = true",
Sequel::Dataset::StoredProcedureMethods, %w'execute execute_dui')
private
# Extend the dataset with the MySQL stored procedure methods.
def prepare_extend_sproc(ds)
ds.with_extend(StoredProcedureMethods)
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/mysql_prepared_statements.rb 0000664 0000000 0000000 00000004171 14342141206 0025735 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module MySQL
module PreparedStatements
module DatabaseMethods
private
# Executes a prepared statement on an available connection. If the
# prepared statement already exists for the connection and has the same
# SQL, reuse it, otherwise, prepare the new statement. Issue a SET
# query with literalized values for each argument, then an EXECUTE to
# execute the query with the arguments.
def execute_prepared_statement(ps_name, opts, &block)
args = opts[:arguments]
ps = prepared_statement(ps_name)
sql = ps.prepared_sql
synchronize(opts[:server]) do |conn|
unless conn.prepared_statements[ps_name] == sql
_execute(conn, "PREPARE #{ps_name} FROM #{literal(sql)}", opts)
conn.prepared_statements[ps_name] = sql
end
i = 0
_execute(conn, "SET " + args.map {|arg| "@sequel_arg_#{i+=1} = #{literal(arg)}"}.join(", "), opts) unless args.empty?
opts = opts.merge(:log_sql=>" (#{sql})") if ps.log_sql
_execute(conn, "EXECUTE #{ps_name}#{" USING #{(1..i).map{|j| "@sequel_arg_#{j}"}.join(', ')}" unless i == 0}", opts, &block)
end
end
end
module DatasetMethods
module CallableStatementMethods
# Extend given dataset with this module so subselects inside subselects in
# prepared statements work.
def subselect_sql_append(sql, ds)
ds.clone(:append_sql=>sql, :prepared_args=>prepared_args, :bind_vars=>@opts[:bind_vars]).
send(:to_prepared_statement, :select, nil, :extend=>bound_variable_modules).
prepared_sql
end
end
PreparedStatementMethods = Sequel::Dataset.send(:prepared_statements_module, :prepare_bind, Sequel::Dataset::UnnumberedArgumentMapper)
private
def bound_variable_modules
[CallableStatementMethods]
end
def prepared_statement_modules
[PreparedStatementMethods]
end
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/replace.rb 0000664 0000000 0000000 00000001522 14342141206 0022047 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
module Replace
# Execute a REPLACE statement on the database (deletes any duplicate
# rows before inserting).
def replace(*values)
execute_insert(replace_sql(*values))
end
# SQL statement for REPLACE
def replace_sql(*values)
clone(:replace=>true).insert_sql(*values)
end
# Replace multiple rows in a single query.
def multi_replace(*values)
clone(:replace=>true).multi_insert(*values)
end
# Databases using this module support REPLACE.
def supports_replace?
true
end
private
# If this is an replace instead of an insert, use replace instead
def insert_insert_sql(sql)
sql << (@opts[:replace] ? 'REPLACE' : 'INSERT')
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/split_alter_table.rb 0000664 0000000 0000000 00000002735 14342141206 0024134 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel::Database::SplitAlterTable
private
# Preprocess the array of operations. If it looks like some operations depend
# on results of earlier operations and may require reloading the schema to
# work correctly, split those operations into separate lists, and between each
# list, remove the cached schema so that the later operations deal with the
# then current table schema.
def apply_alter_table(name, ops)
modified_columns = []
op_groups = [[]]
ops.each do |op|
case op[:op]
when :add_column, :set_column_type, :set_column_null, :set_column_default
if modified_columns.include?(op[:name])
op_groups << []
else
modified_columns << op[:name]
end
when :rename_column
if modified_columns.include?(op[:name]) || modified_columns.include?(op[:new_name])
op_groups << []
end
modified_columns << op[:name] unless modified_columns.include?(op[:name])
modified_columns << op[:new_name] unless modified_columns.include?(op[:new_name])
end
if split_alter_table_op?(op)
op_groups << []
end
op_groups.last << op
end
op_groups.each do |opgs|
next if opgs.empty?
alter_table_sql_list(name, opgs).each{|sql| execute_ddl(sql)}
remove_cached_schema(name)
end
end
# Whether the given alter table op should start a new group.
def split_alter_table_op?(op)
false
end
end
sequel-5.63.0/lib/sequel/adapters/utils/stored_procedures.rb 0000664 0000000 0000000 00000003215 14342141206 0024170 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
module StoredProcedureMethods
# The name of the stored procedure to call
def sproc_name
@opts[:sproc_name]
end
# Call the stored procedure with the given args
def call(*args, &block)
clone(:sproc_args=>args).run(&block)
end
# Programmer friendly string showing this is a stored procedure,
# showing the name of the procedure.
def inspect
"<#{self.class.name}/StoredProcedure name=#{@sproc_name}>"
end
# Run the stored procedure with the current args on the database
def run(&block)
case @opts[:sproc_type]
when :select, :all
all(&block)
when :first
first
when :insert
insert
when :update
update
when :delete
delete
end
end
end
module StoredProcedures
# For the given type (:select, :first, :insert, :update, or :delete),
# run the database stored procedure with the given name with the given
# arguments.
def call_sproc(type, name, *args)
prepare_sproc(type, name).call(*args)
end
# Transform this dataset into a stored procedure that you can call
# multiple times with new arguments.
def prepare_sproc(type, name)
prepare_extend_sproc(self).clone(:sproc_type=>type, :sproc_name=>name, :sql=>'')
end
private
# Extend the dataset with the stored procedure methods.
def prepare_extend_sproc(ds)
ds.with_extend(StoredProcedureMethods)
end
end
end
end
sequel-5.63.0/lib/sequel/adapters/utils/unmodified_identifiers.rb 0000664 0000000 0000000 00000001167 14342141206 0025151 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module UnmodifiedIdentifiers
module DatabaseMethods
private
# Databases that use this module for unquoted identifiers to lowercase.
def folds_unquoted_identifiers_to_uppercase?
false
end
end
module DatasetMethods
private
# Turn the given symbol/string into a symbol, keeping the current case.
def output_identifier(v)
v == '' ? :untitled : v.to_sym
end
# Turn the given symbol/string into a string, keeping the current case.
def input_identifier(v)
v.to_s
end
end
end
end
sequel-5.63.0/lib/sequel/ast_transformer.rb 0000664 0000000 0000000 00000010254 14342141206 0020704 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# The +ASTTransformer+ class is designed to handle the abstract syntax trees
# that Sequel uses internally and produce modified copies of them. By itself
# it only produces a straight copy. It's designed to be subclassed and have
# subclasses returned modified copies of the specific nodes that need to
# be modified.
class ASTTransformer
# Return +obj+ or a potentially transformed version of it.
def transform(obj)
v(obj)
end
private
# Recursive version that handles all of Sequel's internal object types
# and produces copies of them.
def v(o)
case o
when Symbol, Numeric, String, Class, TrueClass, FalseClass, NilClass
o
when Array
o.map{|x| v(x)}
when Hash
h = {}
o.each{|k, val| h[v(k)] = v(val)}
h
when SQL::NumericExpression
if o.op == :extract
o.class.new(o.op, o.args[0], v(o.args[1]))
else
o.class.new(o.op, *v(o.args))
end
when SQL::ComplexExpression
o.class.new(o.op, *v(o.args))
when SQL::Identifier
SQL::Identifier.new(v(o.value))
when SQL::QualifiedIdentifier
SQL::QualifiedIdentifier.new(v(o.table), v(o.column))
when SQL::OrderedExpression
SQL::OrderedExpression.new(v(o.expression), o.descending, :nulls=>o.nulls)
when SQL::AliasedExpression
SQL::AliasedExpression.new(v(o.expression), o.alias, o.columns)
when SQL::CaseExpression
args = [v(o.conditions), v(o.default)]
args << v(o.expression) if o.expression?
SQL::CaseExpression.new(*args)
when SQL::Cast
SQL::Cast.new(v(o.expr), o.type)
when SQL::Function
h = {}
o.opts.each do |k, val|
h[k] = v(val)
end
SQL::Function.new!(o.name, v(o.args), h)
when SQL::Subscript
SQL::Subscript.new(v(o.expression), v(o.sub))
when SQL::Window
opts = o.opts.dup
opts[:partition] = v(opts[:partition]) if opts[:partition]
opts[:order] = v(opts[:order]) if opts[:order]
SQL::Window.new(opts)
when SQL::PlaceholderLiteralString
args = if o.args.is_a?(Hash)
h = {}
o.args.each{|k,val| h[k] = v(val)}
h
else
v(o.args)
end
SQL::PlaceholderLiteralString.new(o.str, args, o.parens)
when SQL::JoinOnClause
SQL::JoinOnClause.new(v(o.on), o.join_type, v(o.table_expr))
when SQL::JoinUsingClause
SQL::JoinUsingClause.new(v(o.using), o.join_type, v(o.table_expr))
when SQL::JoinClause
SQL::JoinClause.new(o.join_type, v(o.table_expr))
when SQL::DelayedEvaluation
SQL::DelayedEvaluation.new(lambda{|ds| v(o.call(ds))})
when SQL::Wrapper
SQL::Wrapper.new(v(o.value))
when SQL::Expression
if o.respond_to?(:sequel_ast_transform)
o.sequel_ast_transform(method(:v))
else
o
end
else
o
end
end
end
# Handles qualifying existing datasets, so that unqualified columns
# in the dataset are qualified with a given table name.
class Qualifier < ASTTransformer
# Set the table used to qualify unqualified columns
def initialize(table)
@table = table
end
private
# Turn SQL::Identifiers and symbols that aren't implicitly
# qualified into SQL::QualifiedIdentifiers. For symbols that
# are not implicitly qualified by are implicitly aliased, return an
# SQL::AliasedExpressions with a qualified version of the symbol.
def v(o)
case o
when Symbol
t, column, aliaz = Sequel.split_symbol(o)
if t
o
elsif aliaz
SQL::AliasedExpression.new(SQL::QualifiedIdentifier.new(@table, SQL::Identifier.new(column)), aliaz)
else
SQL::QualifiedIdentifier.new(@table, o)
end
when SQL::Identifier
SQL::QualifiedIdentifier.new(@table, o)
when SQL::QualifiedIdentifier, SQL::JoinClause
# Return these directly, so we don't accidentally qualify symbols in them.
o
else
super
end
end
end
end
sequel-5.63.0/lib/sequel/connection_pool.rb 0000664 0000000 0000000 00000013630 14342141206 0020664 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
# The base connection pool class, which all other connection pools are based
# on. This class is not instantiated directly, but subclasses should at
# the very least implement the following API:
#
# initialize(Database, Hash) :: Initialize using the passed Sequel::Database
# object and options hash.
# hold(Symbol, &block) :: Yield a connection object (obtained from calling
# the block passed to +initialize+) to the current block. For sharded
# connection pools, the Symbol passed is the shard/server to use.
# disconnect(Symbol) :: Disconnect the connection object. For sharded
# connection pools, the Symbol passed is the shard/server to use.
# servers :: An array of shard/server symbols for all shards/servers that this
# connection pool recognizes.
# size :: an integer representing the total number of connections in the pool,
# or for the given shard/server if sharding is supported.
# max_size :: an integer representing the maximum size of the connection pool,
# or the maximum size per shard/server if sharding is supported.
#
# For sharded connection pools, the sharded API adds the following methods:
#
# add_servers(Array of Symbols) :: start recognizing all shards/servers specified
# by the array of symbols.
# remove_servers(Array of Symbols) :: no longer recognize all shards/servers
# specified by the array of symbols.
class Sequel::ConnectionPool
OPTS = Sequel::OPTS
POOL_CLASS_MAP = {
:threaded => :ThreadedConnectionPool,
:single => :SingleConnectionPool,
:sharded_threaded => :ShardedThreadedConnectionPool,
:sharded_single => :ShardedSingleConnectionPool,
:timed_queue => :TimedQueueConnectionPool,
}
POOL_CLASS_MAP.to_a.each{|k, v| POOL_CLASS_MAP[k.to_s] = v}
POOL_CLASS_MAP.freeze
# Class methods used to return an appropriate pool subclass, separated
# into a module for easier overridding by extensions.
module ClassMethods
# Return a pool subclass instance based on the given options. If a :pool_class
# option is provided is provided, use that pool class, otherwise
# use a new instance of an appropriate pool subclass based on the
# :single_threaded and :servers options.
def get_pool(db, opts = OPTS)
connection_pool_class(opts).new(db, opts)
end
private
# Return a connection pool class based on the given options.
def connection_pool_class(opts)
if pc = opts[:pool_class]
unless pc.is_a?(Class)
unless name = POOL_CLASS_MAP[pc]
raise Sequel::Error, "unsupported connection pool type, please pass appropriate class as the :pool_class option"
end
require_relative "connection_pool/#{pc}"
pc = Sequel.const_get(name)
end
pc
else
pc = if opts[:single_threaded]
opts[:servers] ? :sharded_single : :single
else
opts[:servers] ? :sharded_threaded : :threaded
end
connection_pool_class(:pool_class=>pc)
end
end
end
extend ClassMethods
# The after_connect proc used for this pool. This is called with each new
# connection made, and is usually used to set custom per-connection settings.
# Deprecated.
attr_reader :after_connect # SEQUEL6: Remove
# Override the after_connect proc for the connection pool. Deprecated.
# Disables support for shard-specific :after_connect and :connect_sqls if used.
def after_connect=(v) # SEQUEL6: Remove
@use_old_connect_api = true
@after_connect = v
end
# An array of sql strings to execute on each new connection. Deprecated.
attr_reader :connect_sqls # SEQUEL6: Remove
# Override the connect_sqls for the connection pool. Deprecated.
# Disables support for shard-specific :after_connect and :connect_sqls if used.
def connect_sqls=(v) # SEQUEL6: Remove
@use_old_connect_api = true
@connect_sqls = v
end
# The Sequel::Database object tied to this connection pool.
attr_accessor :db
# Instantiates a connection pool with the given Database and options.
def initialize(db, opts=OPTS) # SEQUEL6: Remove second argument, always use db.opts
@db = db
@use_old_connect_api = false # SEQUEL6: Remove
@after_connect = opts[:after_connect] # SEQUEL6: Remove
@connect_sqls = opts[:connect_sqls] # SEQUEL6: Remove
@error_classes = db.send(:database_error_classes).dup.freeze
end
# An array of symbols for all shards/servers, which is a single :default by default.
def servers
[:default]
end
private
# Remove the connection from the pool. For threaded connections, this should be
# called without the mutex, because the disconnection may block.
def disconnect_connection(conn)
db.disconnect_connection(conn)
end
# Whether the given exception is a disconnect exception.
def disconnect_error?(exception)
exception.is_a?(Sequel::DatabaseDisconnectError) || db.send(:disconnect_error?, exception, OPTS)
end
# Return a new connection by calling the connection proc with the given server name,
# and checking for connection errors.
def make_new(server)
begin
if @use_old_connect_api
# SEQUEL6: Remove block
conn = @db.connect(server)
if ac = @after_connect
if ac.arity == 2
ac.call(conn, server)
else
ac.call(conn)
end
end
if cs = @connect_sqls
cs.each do |sql|
db.send(:log_connection_execute, conn, sql)
end
end
conn
else
@db.new_connection(server)
end
rescue Exception=>exception
raise Sequel.convert_exception_class(exception, Sequel::DatabaseConnectionError)
end || raise(Sequel::DatabaseConnectionError, "Connection parameters not valid")
end
end
sequel-5.63.0/lib/sequel/connection_pool/ 0000775 0000000 0000000 00000000000 14342141206 0020334 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/connection_pool/sharded_single.rb 0000664 0000000 0000000 00000007043 14342141206 0023640 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
# A ShardedSingleConnectionPool is a single threaded connection pool that
# works with multiple shards/servers.
class Sequel::ShardedSingleConnectionPool < Sequel::ConnectionPool
# The single threaded pool takes the following options:
#
# :servers :: A hash of servers to use. Keys should be symbols. If not
# present, will use a single :default server.
# :servers_hash :: The base hash to use for the servers. By default,
# Sequel uses Hash.new(:default). You can use a hash with a default proc
# that raises an error if you want to catch all cases where a nonexistent
# server is used.
def initialize(db, opts=OPTS)
super
@conns = {}
@servers = opts.fetch(:servers_hash, Hash.new(:default))
add_servers([:default])
add_servers(opts[:servers].keys) if opts[:servers]
end
# Adds new servers to the connection pool. Primarily used in conjunction with primary/replica
# or sharded configurations. Allows for dynamic expansion of the potential replicas/shards
# at runtime. +servers+ argument should be an array of symbols.
def add_servers(servers)
servers.each{|s| @servers[s] = s}
end
# Yield all of the currently established connections
def all_connections
@conns.values.each{|c| yield c}
end
# The connection for the given server.
def conn(server=:default)
@conns[@servers[server]]
end
# Disconnects from the database. Once a connection is requested using
# #hold, the connection is reestablished. Options:
# :server :: Should be a symbol specifing the server to disconnect from,
# or an array of symbols to specify multiple servers.
def disconnect(opts=OPTS)
(opts[:server] ? Array(opts[:server]) : servers).each do |s|
raise Sequel::Error, "invalid server: #{s}" unless @servers.has_key?(s)
disconnect_server(s)
end
end
def freeze
@servers.freeze
super
end
# Yields the connection to the supplied block for the given server.
# This method simulates the ConnectionPool#hold API.
def hold(server=:default)
server = pick_server(server)
yield(@conns[server] ||= make_new(server))
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
disconnect_server(server) if disconnect_error?(e)
raise
end
# The ShardedSingleConnectionPool always has a maximum size of 1.
def max_size
1
end
# Remove servers from the connection pool. Similar to disconnecting from all given servers,
# except that after it is used, future requests for the server will use the
# :default server instead.
def remove_servers(servers)
raise(Sequel::Error, "cannot remove default server") if servers.include?(:default)
servers.each do |server|
disconnect_server(server)
@servers.delete(server)
end
end
# Return an array of symbols for servers in the connection pool.
def servers
@servers.keys
end
# The number of different shards/servers this pool is connected to.
def size
@conns.length
end
def pool_type
:sharded_single
end
private
# Disconnect from the given server, if connected.
def disconnect_server(server)
if conn = @conns.delete(server)
disconnect_connection(conn)
end
end
# If the server given is in the hash, return it, otherwise, return the default server.
def pick_server(server)
@servers[server]
end
# Make sure there is a valid connection for each server.
def preconnect(concurrent = nil)
servers.each{|s| hold(s){}}
end
end
sequel-5.63.0/lib/sequel/connection_pool/sharded_threaded.rb 0000664 0000000 0000000 00000031315 14342141206 0024136 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
require_relative 'threaded'
# The slowest and most advanced connection, dealing with both multi-threaded
# access and configurations with multiple shards/servers.
#
# In addition, this pool subclass also handles scheduling in-use connections
# to be removed from the pool when they are returned to it.
class Sequel::ShardedThreadedConnectionPool < Sequel::ThreadedConnectionPool
# The following additional options are respected:
# :servers :: A hash of servers to use. Keys should be symbols. If not
# present, will use a single :default server.
# :servers_hash :: The base hash to use for the servers. By default,
# Sequel uses Hash.new(:default). You can use a hash with a default proc
# that raises an error if you want to catch all cases where a nonexistent
# server is used.
def initialize(db, opts = OPTS)
super
@available_connections = {}
@connections_to_remove = []
@connections_to_disconnect = []
@servers = opts.fetch(:servers_hash, Hash.new(:default))
remove_instance_variable(:@waiter)
remove_instance_variable(:@allocated)
@allocated = {}
@waiters = {}
add_servers([:default])
add_servers(opts[:servers].keys) if opts[:servers]
end
# Adds new servers to the connection pool. Allows for dynamic expansion of the potential replicas/shards
# at runtime. +servers+ argument should be an array of symbols.
def add_servers(servers)
sync do
servers.each do |server|
unless @servers.has_key?(server)
@servers[server] = server
@available_connections[server] = []
allocated = {}
allocated.compare_by_identity
@allocated[server] = allocated
@waiters[server] = ConditionVariable.new
end
end
end
end
# A hash of connections currently being used for the given server, key is the
# Thread, value is the connection. Nonexistent servers will return nil. Treat
# this as read only, do not modify the resulting object.
# The calling code should already have the mutex before calling this.
def allocated(server=:default)
@allocated[server]
end
# Yield all of the available connections, and the ones currently allocated to
# this thread. This will not yield connections currently allocated to other
# threads, as it is not safe to operate on them. This holds the mutex while
# it is yielding all of the connections, which means that until
# the method's block returns, the pool is locked.
def all_connections
t = Sequel.current
sync do
@allocated.values.each do |threads|
threads.each do |thread, conn|
yield conn if t == thread
end
end
@available_connections.values.each{|v| v.each{|c| yield c}}
end
end
# An array of connections opened but not currently used, for the given
# server. Nonexistent servers will return nil. Treat this as read only, do
# not modify the resulting object.
# The calling code should already have the mutex before calling this.
def available_connections(server=:default)
@available_connections[server]
end
# The total number of connections opened for the given server.
# Nonexistent servers will return the created count of the default server.
# The calling code should NOT have the mutex before calling this.
def size(server=:default)
@mutex.synchronize{_size(server)}
end
# Removes all connections currently available on all servers, optionally
# yielding each connection to the given block. This method has the effect of
# disconnecting from the database, assuming that no connections are currently
# being used. If connections are being used, they are scheduled to be
# disconnected as soon as they are returned to the pool.
#
# Once a connection is requested using #hold, the connection pool
# creates new connections to the database. Options:
# :server :: Should be a symbol specifing the server to disconnect from,
# or an array of symbols to specify multiple servers.
def disconnect(opts=OPTS)
(opts[:server] ? Array(opts[:server]) : sync{@servers.keys}).each do |s|
disconnect_connections(sync{disconnect_server_connections(s)})
end
end
def freeze
@servers.freeze
super
end
# Chooses the first available connection to the given server, or if none are
# available, creates a new connection. Passes the connection to the supplied
# block:
#
# pool.hold {|conn| conn.execute('DROP TABLE posts')}
#
# Pool#hold is re-entrant, meaning it can be called recursively in
# the same thread without blocking.
#
# If no connection is immediately available and the pool is already using the maximum
# number of connections, Pool#hold will block until a connection
# is available or the timeout expires. If the timeout expires before a
# connection can be acquired, a Sequel::PoolTimeout is raised.
def hold(server=:default)
server = pick_server(server)
t = Sequel.current
if conn = owned_connection(t, server)
return yield(conn)
end
begin
conn = acquire(t, server)
yield conn
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
sync{@connections_to_remove << conn} if conn && disconnect_error?(e)
raise
ensure
sync{release(t, conn, server)} if conn
while dconn = sync{@connections_to_disconnect.shift}
disconnect_connection(dconn)
end
end
end
# Remove servers from the connection pool. Similar to disconnecting from all given servers,
# except that after it is used, future requests for the server will use the
# :default server instead.
def remove_servers(servers)
conns = nil
sync do
raise(Sequel::Error, "cannot remove default server") if servers.include?(:default)
servers.each do |server|
if @servers.include?(server)
conns = disconnect_server_connections(server)
@waiters.delete(server)
@available_connections.delete(server)
@allocated.delete(server)
@servers.delete(server)
end
end
end
if conns
disconnect_connections(conns)
end
end
# Return an array of symbols for servers in the connection pool.
def servers
sync{@servers.keys}
end
def pool_type
:sharded_threaded
end
private
# The total number of connections opened for the given server.
# The calling code should already have the mutex before calling this.
def _size(server)
server = @servers[server]
@allocated[server].length + @available_connections[server].length
end
# Assigns a connection to the supplied thread, if one
# is available. The calling code should NOT already have the mutex when
# calling this.
#
# This should return a connection is one is available within the timeout,
# or nil if a connection could not be acquired within the timeout.
def acquire(thread, server)
if conn = assign_connection(thread, server)
return conn
end
timeout = @timeout
timer = Sequel.start_timer
sync do
@waiters[server].wait(@mutex, timeout)
if conn = next_available(server)
return(allocated(server)[thread] = conn)
end
end
until conn = assign_connection(thread, server)
elapsed = Sequel.elapsed_seconds_since(timer)
# :nocov:
raise_pool_timeout(elapsed, server) if elapsed > timeout
# It's difficult to get to this point, it can only happen if there is a race condition
# where a connection cannot be acquired even after the thread is signalled by the condition variable
sync do
@waiters[server].wait(@mutex, timeout - elapsed)
if conn = next_available(server)
return(allocated(server)[thread] = conn)
end
end
# :nocov:
end
conn
end
# Assign a connection to the thread, or return nil if one cannot be assigned.
# The caller should NOT have the mutex before calling this.
def assign_connection(thread, server)
alloc = nil
do_make_new = false
sync do
alloc = allocated(server)
if conn = next_available(server)
alloc[thread] = conn
return conn
end
if (n = _size(server)) >= (max = @max_size)
alloc.to_a.each do |t,c|
unless t.alive?
remove(t, c, server)
end
end
n = nil
end
if (n || _size(server)) < max
do_make_new = alloc[thread] = true
end
end
# Connect to the database outside of the connection pool mutex,
# as that can take a long time and the connection pool mutex
# shouldn't be locked while the connection takes place.
if do_make_new
begin
conn = make_new(server)
sync{alloc[thread] = conn}
ensure
unless conn
sync{alloc.delete(thread)}
end
end
end
conn
end
# Return a connection to the pool of available connections for the server,
# returns the connection. The calling code should already have the mutex
# before calling this.
def checkin_connection(server, conn)
available_connections(server) << conn
@waiters[server].signal
conn
end
# Clear the array of available connections for the server, returning an array
# of previous available connections that should be disconnected (or nil if none should be).
# Mark any allocated connections to be removed when they are checked back in. The calling
# code should already have the mutex before calling this.
def disconnect_server_connections(server)
remove_conns = allocated(server)
dis_conns = available_connections(server)
raise Sequel::Error, "invalid server: #{server}" unless remove_conns && dis_conns
@connections_to_remove.concat(remove_conns.values)
conns = dis_conns.dup
dis_conns.clear
@waiters[server].signal
conns
end
# Disconnect all available connections immediately, and schedule currently allocated connections for disconnection
# as soon as they are returned to the pool. The calling code should NOT
# have the mutex before calling this.
def disconnect_connections(conns)
conns.each{|conn| disconnect_connection(conn)}
end
# Return the next available connection in the pool for the given server, or nil
# if there is not currently an available connection for the server.
# The calling code should already have the mutex before calling this.
def next_available(server)
case @connection_handling
when :stack
available_connections(server).pop
else
available_connections(server).shift
end
end
# Returns the connection owned by the supplied thread for the given server,
# if any. The calling code should NOT already have the mutex before calling this.
def owned_connection(thread, server)
sync{@allocated[server][thread]}
end
# If the server given is in the hash, return it, otherwise, return the default server.
def pick_server(server)
sync{@servers[server]}
end
# Create the maximum number of connections immediately. The calling code should
# NOT have the mutex before calling this.
def preconnect(concurrent = false)
conn_servers = @servers.keys.map!{|s| Array.new(max_size - _size(s), s)}.flatten!
if concurrent
conn_servers.map!{|s| Thread.new{[s, make_new(s)]}}.map!(&:value)
else
conn_servers.map!{|s| [s, make_new(s)]}
end
sync{conn_servers.each{|s, conn| checkin_connection(s, conn)}}
end
# Raise a PoolTimeout error showing the current timeout, the elapsed time, the server
# the connection attempt was made to, and the database's name (if any).
def raise_pool_timeout(elapsed, server)
name = db.opts[:name]
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}, server: #{server}#{", database name: #{name}" if name}"
end
# Releases the connection assigned to the supplied thread and server. If the
# server or connection given is scheduled for disconnection, remove the
# connection instead of releasing it back to the pool.
# The calling code should already have the mutex before calling this.
def release(thread, conn, server)
if @connections_to_remove.include?(conn)
remove(thread, conn, server)
else
conn = allocated(server).delete(thread)
if @connection_handling == :disconnect
@connections_to_disconnect << conn
else
checkin_connection(server, conn)
end
end
if waiter = @waiters[server]
waiter.signal
end
end
# Removes the currently allocated connection from the connection pool. The
# calling code should already have the mutex before calling this.
def remove(thread, conn, server)
@connections_to_remove.delete(conn)
allocated(server).delete(thread) if @servers.include?(server)
@connections_to_disconnect << conn
end
end
sequel-5.63.0/lib/sequel/connection_pool/single.rb 0000664 0000000 0000000 00000002342 14342141206 0022143 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
# This is the fastest connection pool, since it isn't a connection pool at all.
# It is just a wrapper around a single connection that uses the connection pool
# API.
class Sequel::SingleConnectionPool < Sequel::ConnectionPool
def initialize(db, opts=OPTS)
super
@conn = []
end
# Yield the connection if one has been made.
def all_connections
yield @conn.first unless @conn.empty?
end
# Disconnect the connection from the database.
def disconnect(opts=nil)
return unless c = @conn.first
disconnect_connection(c)
@conn.clear
nil
end
# Yield the connection to the block.
def hold(server=nil)
unless c = @conn.first
@conn.replace([c = make_new(:default)])
end
yield c
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
disconnect if disconnect_error?(e)
raise
end
# The SingleConnectionPool always has a maximum size of 1.
def max_size
1
end
def pool_type
:single
end
# The SingleConnectionPool always has a size of 1 if connected
# and 0 if not.
def size
@conn.empty? ? 0 : 1
end
private
# Make sure there is a valid connection.
def preconnect(concurrent = nil)
hold{}
end
end
sequel-5.63.0/lib/sequel/connection_pool/threaded.rb 0000664 0000000 0000000 00000021601 14342141206 0022441 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
# A connection pool allowing multi-threaded access to a pool of connections.
# This is the default connection pool used by Sequel.
class Sequel::ThreadedConnectionPool < Sequel::ConnectionPool
USE_WAITER = true # SEQUEL6: Remove
Sequel::Deprecation.deprecate_constant(self, :USE_WAITER)
# The maximum number of connections this pool will create (per shard/server
# if sharding).
attr_reader :max_size
# An array of connections that are available for use by the pool.
# The calling code should already have the mutex before calling this.
attr_reader :available_connections # SEQUEL6: Remove
# A hash with thread/fiber keys and connection values for currently allocated connections.
# The calling code should already have the mutex before calling this.
attr_reader :allocated # SEQUEL6: Remove
# The following additional options are respected:
# :max_connections :: The maximum number of connections the connection pool
# will open (default 4)
# :pool_timeout :: The amount of seconds to wait to acquire a connection
# before raising a PoolTimeout error (default 5)
def initialize(db, opts = OPTS)
super
@max_size = Integer(opts[:max_connections] || 4)
raise(Sequel::Error, ':max_connections must be positive') if @max_size < 1
@mutex = Mutex.new
@connection_handling = opts[:connection_handling]
@available_connections = []
@allocated = {}
@allocated.compare_by_identity
@timeout = Float(opts[:pool_timeout] || 5)
@waiter = ConditionVariable.new
end
# Yield all of the available connections, and the one currently allocated to
# this thread. This will not yield connections currently allocated to other
# threads, as it is not safe to operate on them. This holds the mutex while
# it is yielding all of the available connections, which means that until
# the method's block returns, the pool is locked.
def all_connections
hold do |c|
sync do
yield c
@available_connections.each{|conn| yield conn}
end
end
end
# Removes all connections currently available. This method has the effect of
# disconnecting from the database, assuming that no connections are currently
# being used. If you want to be able to disconnect connections that are
# currently in use, use the ShardedThreadedConnectionPool, which can do that.
# This connection pool does not, for performance reasons. To use the sharded pool,
# pass the servers: {} option when connecting to the database.
#
# Once a connection is requested using #hold, the connection pool
# creates new connections to the database.
def disconnect(opts=OPTS)
conns = nil
sync do
conns = @available_connections.dup
@available_connections.clear
@waiter.signal
end
conns.each{|conn| disconnect_connection(conn)}
end
# Chooses the first available connection, or if none are
# available, creates a new connection. Passes the connection to the supplied
# block:
#
# pool.hold {|conn| conn.execute('DROP TABLE posts')}
#
# Pool#hold is re-entrant, meaning it can be called recursively in
# the same thread without blocking.
#
# If no connection is immediately available and the pool is already using the maximum
# number of connections, Pool#hold will block until a connection
# is available or the timeout expires. If the timeout expires before a
# connection can be acquired, a Sequel::PoolTimeout is raised.
def hold(server=nil)
t = Sequel.current
if conn = owned_connection(t)
return yield(conn)
end
begin
conn = acquire(t)
yield conn
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
if disconnect_error?(e)
oconn = conn
conn = nil
disconnect_connection(oconn) if oconn
sync do
@allocated.delete(t)
@waiter.signal
end
end
raise
ensure
if conn
sync{release(t)}
if @connection_handling == :disconnect
disconnect_connection(conn)
end
end
end
end
def pool_type
:threaded
end
# The total number of connections opened, either available or allocated.
# The calling code should not have the mutex before calling this.
def size
@mutex.synchronize{_size}
end
private
# The total number of connections opened, either available or allocated.
# The calling code should already have the mutex before calling this.
def _size
@allocated.length + @available_connections.length
end
# Assigns a connection to the supplied thread, if one
# is available. The calling code should NOT already have the mutex when
# calling this.
#
# This should return a connection is one is available within the timeout,
# or raise PoolTimeout if a connection could not be acquired within the timeout.
def acquire(thread)
if conn = assign_connection(thread)
return conn
end
timeout = @timeout
timer = Sequel.start_timer
sync do
@waiter.wait(@mutex, timeout)
if conn = next_available
return(@allocated[thread] = conn)
end
end
until conn = assign_connection(thread)
elapsed = Sequel.elapsed_seconds_since(timer)
# :nocov:
raise_pool_timeout(elapsed) if elapsed > timeout
# It's difficult to get to this point, it can only happen if there is a race condition
# where a connection cannot be acquired even after the thread is signalled by the condition variable
sync do
@waiter.wait(@mutex, timeout - elapsed)
if conn = next_available
return(@allocated[thread] = conn)
end
end
# :nocov:
end
conn
end
# Assign a connection to the thread, or return nil if one cannot be assigned.
# The caller should NOT have the mutex before calling this.
def assign_connection(thread)
# Thread safe as instance variable is only assigned to local variable
# and not operated on outside mutex.
allocated = @allocated
do_make_new = false
to_disconnect = nil
sync do
if conn = next_available
return(allocated[thread] = conn)
end
if (n = _size) >= (max = @max_size)
allocated.keys.each do |t|
unless t.alive?
(to_disconnect ||= []) << allocated.delete(t)
end
end
n = nil
end
if (n || _size) < max
do_make_new = allocated[thread] = true
end
end
if to_disconnect
to_disconnect.each{|dconn| disconnect_connection(dconn)}
end
# Connect to the database outside of the connection pool mutex,
# as that can take a long time and the connection pool mutex
# shouldn't be locked while the connection takes place.
if do_make_new
begin
conn = make_new(:default)
sync{allocated[thread] = conn}
ensure
unless conn
sync{allocated.delete(thread)}
end
end
end
conn
end
# Return a connection to the pool of available connections, returns the connection.
# The calling code should already have the mutex before calling this.
def checkin_connection(conn)
@available_connections << conn
conn
end
# Return the next available connection in the pool, or nil if there
# is not currently an available connection. The calling code should already
# have the mutex before calling this.
def next_available
case @connection_handling
when :stack
@available_connections.pop
else
@available_connections.shift
end
end
# Returns the connection owned by the supplied thread,
# if any. The calling code should NOT already have the mutex before calling this.
def owned_connection(thread)
sync{@allocated[thread]}
end
# Create the maximum number of connections immediately. The calling code should
# NOT have the mutex before calling this.
def preconnect(concurrent = false)
enum = (max_size - _size).times
conns = if concurrent
enum.map{Thread.new{make_new(:default)}}.map(&:value)
else
enum.map{make_new(:default)}
end
sync{conns.each{|conn| checkin_connection(conn)}}
end
# Raise a PoolTimeout error showing the current timeout, the elapsed time, and the
# database's name (if any).
def raise_pool_timeout(elapsed)
name = db.opts[:name]
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}, elapsed: #{elapsed}#{", database name: #{name}" if name}"
end
# Releases the connection assigned to the supplied thread back to the pool.
# The calling code should already have the mutex before calling this.
def release(thread)
conn = @allocated.delete(thread)
unless @connection_handling == :disconnect
checkin_connection(conn)
end
@waiter.signal
nil
end
# Yield to the block while inside the mutex. The calling code should NOT
# already have the mutex before calling this.
def sync
@mutex.synchronize{yield}
end
end
sequel-5.63.0/lib/sequel/connection_pool/timed_queue.rb 0000664 0000000 0000000 00000020025 14342141206 0023166 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
# :nocov:
raise LoadError, "Sequel::TimedQueueConnectionPool is only available on Ruby 3.2+" unless RUBY_VERSION >= '3.2'
# :nocov:
# A connection pool allowing multi-threaded access to a pool of connections,
# using a timed queue (only available in Ruby 3.2+).
class Sequel::TimedQueueConnectionPool < Sequel::ConnectionPool
# The maximum number of connections this pool will create.
attr_reader :max_size
# The following additional options are respected:
# :max_connections :: The maximum number of connections the connection pool
# will open (default 4)
# :pool_timeout :: The amount of seconds to wait to acquire a connection
# before raising a PoolTimeout (default 5)
def initialize(db, opts = OPTS)
super
@max_size = Integer(opts[:max_connections] || 4)
raise(Sequel::Error, ':max_connections must be positive') if @max_size < 1
@mutex = Mutex.new
# Size inside array so this still works while the pool is frozen.
@size = [0]
@allocated = {}
@allocated.compare_by_identity
@timeout = Float(opts[:pool_timeout] || 5)
@queue = Queue.new
end
# Yield all of the available connections, and the one currently allocated to
# this thread. This will not yield connections currently allocated to other
# threads, as it is not safe to operate on them.
def all_connections
hold do |conn|
yield conn
# Use a hash to record all connections already seen. As soon as we
# come across a connection we've already seen, we stop the loop.
conns = {}
conns.compare_by_identity
while true
conn = nil
begin
break unless (conn = @queue.pop(timeout: 0)) && !conns[conn]
conns[conn] = true
yield conn
ensure
@queue.push(conn) if conn
end
end
end
end
# Removes all connections currently in the pool's queue. This method has the effect of
# disconnecting from the database, assuming that no connections are currently
# being used.
#
# Once a connection is requested using #hold, the connection pool
# creates new connections to the database.
def disconnect(opts=OPTS)
while conn = @queue.pop(timeout: 0)
disconnect_connection(conn)
end
fill_queue
nil
end
# Chooses the first available connection, or if none are
# available, creates a new connection. Passes the connection to the supplied
# block:
#
# pool.hold {|conn| conn.execute('DROP TABLE posts')}
#
# Pool#hold is re-entrant, meaning it can be called recursively in
# the same thread without blocking.
#
# If no connection is immediately available and the pool is already using the maximum
# number of connections, Pool#hold will block until a connection
# is available or the timeout expires. If the timeout expires before a
# connection can be acquired, a Sequel::PoolTimeout is raised.
def hold(server=nil)
t = Sequel.current
if conn = sync{@allocated[t]}
return yield(conn)
end
begin
conn = acquire(t)
yield conn
rescue Sequel::DatabaseDisconnectError, *@error_classes => e
if disconnect_error?(e)
oconn = conn
conn = nil
disconnect_connection(oconn) if oconn
sync{@allocated.delete(t)}
fill_queue
end
raise
ensure
release(t) if conn
end
end
def pool_type
:timed_queue
end
# The total number of connections in the pool.
def size
sync{@size[0]}
end
private
# Create a new connection, after the pool's current size has already
# been updated to account for the new connection. If there is an exception
# when creating the connection, decrement the current size.
#
# This should only be called after can_make_new?. If there is an exception
# between when can_make_new? is called and when preallocated_make_new
# is called, it has the effect of reducing the maximum size of the
# connection pool by 1, since the current size of the pool will show a
# higher number than the number of connections allocated or
# in the queue.
#
# Calling code should not have the mutex when calling this.
def preallocated_make_new
make_new(:default)
rescue Exception
sync{@size[0] -= 1}
raise
end
# Decrement the current size of the pool when disconnecting connections.
#
# Calling code should not have the mutex when calling this.
def disconnect_connection(conn)
sync{@size[0] -= 1}
super
end
# If there are any threads waiting on the queue, try to create
# new connections in a separate thread if the pool is not yet at the
# maximum size.
#
# The reason for this method is to handle cases where acquire
# could not retrieve a connection immediately, and the pool
# was already at the maximum size. In that case, the acquire will
# wait on the queue until the timeout. This method is called
# after disconnecting to potentially add new connections to the
# pool, so the threads that are currently waiting for connections
# do not timeout after the pool is no longer full.
def fill_queue
if @queue.num_waiting > 0
Thread.new do
while @queue.num_waiting > 0 && (conn = try_make_new)
@queue.push(conn)
end
end
end
end
# Whether the given size is less than the maximum size of the pool.
# In that case, the pool's current size is incremented. If this
# method returns true, space in the pool for the connection is
# preallocated, and preallocated_make_new should be called to
# create the connection.
#
# Calling code should have the mutex when calling this.
def can_make_new?(current_size)
if @max_size > current_size
@size[0] += 1
end
end
# Try to make a new connection if there is space in the pool.
# If the pool is already full, look for dead threads/fibers and
# disconnect the related connections.
#
# Calling code should not have the mutex when calling this.
def try_make_new
return preallocated_make_new if sync{can_make_new?(@size[0])}
to_disconnect = nil
do_make_new = false
sync do
current_size = @size[0]
@allocated.keys.each do |t|
unless t.alive?
(to_disconnect ||= []) << @allocated.delete(t)
current_size -= 1
end
end
do_make_new = true if can_make_new?(current_size)
end
begin
preallocated_make_new if do_make_new
ensure
if to_disconnect
to_disconnect.each{|conn| disconnect_connection(conn)}
fill_queue
end
end
end
# Assigns a connection to the supplied thread, if one
# is available.
#
# This should return a connection is one is available within the timeout,
# or raise PoolTimeout if a connection could not be acquired within the timeout.
#
# Calling code should not have the mutex when calling this.
def acquire(thread)
if conn = @queue.pop(timeout: 0) || try_make_new || @queue.pop(timeout: @timeout)
sync{@allocated[thread] = conn}
else
name = db.opts[:name]
raise ::Sequel::PoolTimeout, "timeout: #{@timeout}#{", database name: #{name}" if name}"
end
end
# Create the maximum number of connections immediately. This should not be called
# with a true argument unles no code is currently operating on the database.
#
# Calling code should not have the mutex when calling this.
def preconnect(concurrent = false)
if concurrent
if times = sync{@max_size > (size = @size[0]) ? @max_size - size : false}
times.times.map{Thread.new{if conn = try_make_new; @queue.push(conn) end}}.map(&:value)
end
else
while conn = try_make_new
@queue.push(conn)
end
end
nil
end
# Releases the connection assigned to the supplied thread back to the pool.
#
# Calling code should not have the mutex when calling this.
def release(thread)
@queue.push(sync{@allocated.delete(thread)})
end
# Yield to the block while inside the mutex.
#
# Calling code should not have the mutex when calling this.
def sync
@mutex.synchronize{yield}
end
end
sequel-5.63.0/lib/sequel/core.rb 0000664 0000000 0000000 00000040425 14342141206 0016426 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
%w'bigdecimal date thread time uri'.each{|f| require f}
# Top level module for Sequel
#
# There are some module methods that are added via metaprogramming, one for
# each supported adapter. For example:
#
# DB = Sequel.sqlite # Memory database
# DB = Sequel.sqlite('blog.db')
# DB = Sequel.postgres('database_name',
# user:'user',
# password: 'password',
# host: 'host'
# port: 5432,
# max_connections: 10)
#
# If a block is given to these methods, it is passed the opened Database
# object, which is closed (disconnected) when the block exits, just
# like a block passed to Sequel.connect. For example:
#
# Sequel.sqlite('blog.db'){|db| puts db[:users].count}
#
# For a more expanded introduction, see the {README}[rdoc-ref:README.rdoc].
# For a quicker introduction, see the {cheat sheet}[rdoc-ref:doc/cheat_sheet.rdoc].
module Sequel
@convert_two_digit_years = true
@datetime_class = Time
@split_symbols = false
@single_threaded = false
# Mutex used to protect mutable data structures
@data_mutex = Mutex.new
# Frozen hash used as the default options hash for most options.
OPTS = {}.freeze
SPLIT_SYMBOL_CACHE = {}
module SequelMethods
# Sequel converts two digit years in Dates and DateTimes by default,
# so 01/02/03 is interpreted at January 2nd, 2003, and 12/13/99 is interpreted
# as December 13, 1999. You can override this to treat those dates as
# January 2nd, 0003 and December 13, 0099, respectively, by:
#
# Sequel.convert_two_digit_years = false
attr_accessor :convert_two_digit_years
# Sequel can use either +Time+ or +DateTime+ for times returned from the
# database. It defaults to +Time+. To change it to +DateTime+:
#
# Sequel.datetime_class = DateTime
#
# Note that +Time+ and +DateTime+ objects have a different API, and in
# cases where they implement the same methods, they often implement them
# differently (e.g. + using seconds on +Time+ and days on +DateTime+).
attr_accessor :datetime_class
# Set whether Sequel is being used in single threaded mode. By default,
# Sequel uses a thread-safe connection pool, which isn't as fast as the
# single threaded connection pool, and also has some additional thread
# safety checks. If your program will only have one thread,
# and speed is a priority, you should set this to true:
#
# Sequel.single_threaded = true
attr_accessor :single_threaded
# Alias of original require method, as Sequel.require does a relative
# require for backwards compatibility.
alias orig_require require
private :orig_require
# Returns true if the passed object could be a specifier of conditions, false otherwise.
# Currently, Sequel considers hashes and arrays of two element arrays as
# condition specifiers.
#
# Sequel.condition_specifier?({}) # => true
# Sequel.condition_specifier?([[1, 2]]) # => true
# Sequel.condition_specifier?([]) # => false
# Sequel.condition_specifier?([1]) # => false
# Sequel.condition_specifier?(1) # => false
def condition_specifier?(obj)
case obj
when Hash
true
when Array
!obj.empty? && !obj.is_a?(SQL::ValueList) && obj.all?{|i| i.is_a?(Array) && (i.length == 2)}
else
false
end
end
# Creates a new database object based on the supplied connection string
# and optional arguments. The specified scheme determines the database
# class used, and the rest of the string specifies the connection options.
# For example:
#
# DB = Sequel.connect('sqlite:/') # Memory database
# DB = Sequel.connect('sqlite://blog.db') # ./blog.db
# DB = Sequel.connect('sqlite:///blog.db') # /blog.db
# DB = Sequel.connect('postgres://user:password@host:port/database_name')
# DB = Sequel.connect('sqlite:///blog.db', max_connections: 10)
#
# You can also pass a single options hash:
#
# DB = Sequel.connect(adapter: 'sqlite', database: './blog.db')
#
# If a block is given, it is passed the opened +Database+ object, which is
# closed when the block exits. For example:
#
# Sequel.connect('sqlite://blog.db'){|db| puts db[:users].count}
#
# If a block is not given, a reference to this database will be held in
# Sequel::DATABASES until it is removed manually. This is by
# design, and used by Sequel::Model to pick the default
# database. It is recommended to pass a block if you do not want the
# resulting Database object to remain in memory until the process
# terminates, or use the keep_reference: false Database option.
#
# For details, see the {"Connecting to a Database" guide}[rdoc-ref:doc/opening_databases.rdoc].
# To set up a primary/replica or sharded database connection, see the {"Primary/Replica Database Configurations and Sharding" guide}[rdoc-ref:doc/sharding.rdoc].
def connect(*args, &block)
Database.connect(*args, &block)
end
# Assume the core extensions are not loaded by default, if the core_extensions
# extension is loaded, this will be overridden.
def core_extensions?
false
end
# Convert the +exception+ to the given class. The given class should be
# Sequel::Error or a subclass. Returns an instance of +klass+ with
# the message and backtrace of +exception+.
def convert_exception_class(exception, klass)
return exception if exception.is_a?(klass)
e = klass.new("#{exception.class}: #{exception.message}")
e.wrapped_exception = exception
e.set_backtrace(exception.backtrace)
e
end
# The current concurrency primitive, Thread.current by default.
def current
Thread.current
end
# Load all Sequel extensions given. Extensions are just files that exist under
# sequel/extensions in the load path, and are just required.
# In some cases, requiring an extension modifies classes directly, and in others,
# it just loads a module that you can extend other classes with. Consult the documentation
# for each extension you plan on using for usage.
#
# Sequel.extension(:blank)
# Sequel.extension(:core_extensions, :named_timezones)
def extension(*extensions)
extensions.each{|e| orig_require("sequel/extensions/#{e}")}
end
# The exception classed raised if there is an error parsing JSON.
# This can be overridden to use an alternative json implementation.
def json_parser_error_class
JSON::ParserError
end
# Convert given object to json and return the result.
# This can be overridden to use an alternative json implementation.
def object_to_json(obj, *args, &block)
obj.to_json(*args, &block)
end
# Parse the string as JSON and return the result.
# This can be overridden to use an alternative json implementation.
def parse_json(json)
JSON.parse(json, :create_additions=>false)
end
# If a mutex is given, synchronize access using it. If nil is given, just
# yield to the block. This is designed for cases where a mutex may or may
# not be provided.
def synchronize_with(mutex)
if mutex
mutex.synchronize{yield}
else
yield
end
end
# Convert each item in the array to the correct type, handling multi-dimensional
# arrays. For each element in the array or subarrays, call the converter,
# unless the value is nil.
def recursive_map(array, converter)
array.map do |i|
if i.is_a?(Array)
recursive_map(i, converter)
elsif !i.nil?
converter.call(i)
end
end
end
# For backwards compatibility only. require_relative should be used instead.
def require(files, subdir=nil)
# Use Kernel.require_relative to work around JRuby 9.0 bug
Array(files).each{|f| Kernel.require_relative "#{"#{subdir}/" if subdir}#{f}"}
end
# Splits the symbol into three parts, if symbol splitting is enabled (not the default).
# Each part will either be a string or nil. If symbol splitting
# is disabled, returns an array with the first and third parts
# being nil, and the second part beind a string version of the symbol.
#
# For columns, these parts are the table, column, and alias.
# For tables, these parts are the schema, table, and alias.
def split_symbol(sym)
unless v = Sequel.synchronize{SPLIT_SYMBOL_CACHE[sym]}
if split_symbols?
v = case s = sym.to_s
when /\A((?:(?!__).)+)__((?:(?!___).)+)___(.+)\z/
[$1.freeze, $2.freeze, $3.freeze].freeze
when /\A((?:(?!___).)+)___(.+)\z/
[nil, $1.freeze, $2.freeze].freeze
when /\A((?:(?!__).)+)__(.+)\z/
[$1.freeze, $2.freeze, nil].freeze
else
[nil, s.freeze, nil].freeze
end
else
v = [nil,sym.to_s.freeze,nil].freeze
end
Sequel.synchronize{SPLIT_SYMBOL_CACHE[sym] = v}
end
v
end
# Setting this to true enables Sequel's historical behavior of splitting
# symbols on double or triple underscores:
#
# :table__column # table.column
# :column___alias # column AS alias
# :table__column___alias # table.column AS alias
#
# It is only recommended to turn this on for backwards compatibility until
# such symbols have been converted to use newer Sequel APIs such as:
#
# Sequel[:table][:column] # table.column
# Sequel[:column].as(:alias) # column AS alias
# Sequel[:table][:column].as(:alias) # table.column AS alias
#
# Sequel::Database instances do their own caching of literalized
# symbols, and changing this setting does not affect those caches. It is
# recommended that if you want to change this setting, you do so directly
# after requiring Sequel, before creating any Sequel::Database instances.
#
# Disabling symbol splitting will also disable the handling
# of double underscores in virtual row methods, causing such methods to
# yield regular identifers instead of qualified identifiers:
#
# # Sequel.split_symbols = true
# Sequel.expr{table__column} # table.column
# Sequel.expr{table[:column]} # table.column
#
# # Sequel.split_symbols = false
# Sequel.expr{table__column} # table__column
# Sequel.expr{table[:column]} # table.column
def split_symbols=(v)
Sequel.synchronize{SPLIT_SYMBOL_CACHE.clear}
@split_symbols = v
end
# Whether Sequel currently splits symbols into qualified/aliased identifiers.
def split_symbols?
@split_symbols
end
# Converts the given +string+ into a +Date+ object.
#
# Sequel.string_to_date('2010-09-10') # Date.civil(2010, 09, 10)
def string_to_date(string)
Date.parse(string, Sequel.convert_two_digit_years)
rescue => e
raise convert_exception_class(e, InvalidValue)
end
# Converts the given +string+ into a +Time+ or +DateTime+ object, depending on the
# value of Sequel.datetime_class.
#
# Sequel.string_to_datetime('2010-09-10 10:20:30') # Time.local(2010, 09, 10, 10, 20, 30)
def string_to_datetime(string)
if datetime_class == DateTime
DateTime.parse(string, convert_two_digit_years)
else
datetime_class.parse(string)
end
rescue => e
raise convert_exception_class(e, InvalidValue)
end
# Converts the given +string+ into a Sequel::SQLTime object.
#
# v = Sequel.string_to_time('10:20:30') # Sequel::SQLTime.parse('10:20:30')
# DB.literal(v) # => '10:20:30'
def string_to_time(string)
SQLTime.parse(string)
rescue => e
raise convert_exception_class(e, InvalidValue)
end
# Unless in single threaded mode, protects access to any mutable
# global data structure in Sequel.
# Uses a non-reentrant mutex, so calling code should be careful.
# In general, this should only be used around the minimal possible code
# such as Hash#[], Hash#[]=, Hash#delete, Array#<<, and Array#delete.
def synchronize(&block)
@single_threaded ? yield : @data_mutex.synchronize(&block)
end
if RUBY_VERSION >= '2.1'
# A timer object that can be passed to Sequel.elapsed_seconds_since
# to return the number of seconds elapsed.
def start_timer
Process.clock_gettime(Process::CLOCK_MONOTONIC)
end
else
# :nocov:
def start_timer # :nodoc:
Time.now
end
# :nocov:
end
# The elapsed seconds since the given timer object was created. The
# timer object should have been created via Sequel.start_timer.
def elapsed_seconds_since(timer)
start_timer - timer
end
# Uses a transaction on all given databases with the given options. This:
#
# Sequel.transaction([DB1, DB2, DB3]){}
#
# is equivalent to:
#
# DB1.transaction do
# DB2.transaction do
# DB3.transaction do
# end
# end
# end
#
# except that if Sequel::Rollback is raised by the block, the transaction is
# rolled back on all databases instead of just the last one.
#
# Note that this method cannot guarantee that all databases will commit or
# rollback. For example, if DB3 commits but attempting to commit on DB2
# fails (maybe because foreign key checks are deferred), there is no way
# to uncommit the changes on DB3. For that kind of support, you need to
# have two-phase commit/prepared transactions (which Sequel supports on
# some databases).
def transaction(dbs, opts=OPTS, &block)
unless opts[:rollback]
rescue_rollback = true
opts = Hash[opts].merge!(:rollback=>:reraise)
end
pr = dbs.reverse.inject(block){|bl, db| proc{db.transaction(opts, &bl)}}
if rescue_rollback
begin
pr.call
rescue Sequel::Rollback
nil
end
else
pr.call
end
end
# If the supplied block takes a single argument,
# yield an SQL::VirtualRow instance to the block
# argument. Otherwise, evaluate the block in the context of a
# SQL::VirtualRow instance.
#
# Sequel.virtual_row{a} # Sequel::SQL::Identifier.new(:a)
# Sequel.virtual_row{|o| o.a} # Sequel::SQL::Function.new(:a)
def virtual_row(&block)
vr = VIRTUAL_ROW
case block.arity
when -1, 0
vr.instance_exec(&block)
else
block.call(vr)
end
end
private
# Return a hash of date information parsed from the given string.
def _date_parse(string)
Date._parse(string)
end
# Helper method that the database adapter class methods that are added to Sequel via
# metaprogramming use to parse arguments.
def adapter_method(adapter, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : OPTS
opts = {:adapter => adapter.to_sym}
opts[:database] = args.shift if args.first.is_a?(String)
if args.any?
raise ::Sequel::Error, "Wrong format of arguments, either use (), (String), (Hash), or (String, Hash)"
end
connect(opts.merge(options), &block)
end
# Method that adds a database adapter class method to Sequel that calls
# Sequel.adapter_method.
def def_adapter_method(*adapters) # :nodoc:
adapters.each do |adapter|
define_singleton_method(adapter){|*args, &block| adapter_method(adapter, *args, &block)}
end
end
end
extend SequelMethods
require_relative "deprecated"
require_relative "sql"
require_relative "connection_pool"
require_relative "exceptions"
require_relative "dataset"
require_relative "database"
require_relative "timezones"
require_relative "ast_transformer"
require_relative "version"
class << self
# Allow nicer syntax for creating Sequel expressions:
#
# Sequel[1] # => Sequel::SQL::NumericExpression: 1
# Sequel["a"] # => Sequel::SQL::StringExpression: 'a'
# Sequel[:a] # => Sequel::SQL::Identifier: "a"
# Sequel[a: 1] # => Sequel::SQL::BooleanExpression: ("a" = 1)
alias_method :[], :expr
end
# Add the database adapter class methods to Sequel via metaprogramming
def_adapter_method(*Database::ADAPTERS)
end
sequel-5.63.0/lib/sequel/database.rb 0000664 0000000 0000000 00000002563 14342141206 0017243 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# Hash of adapters that have been used. The key is the adapter scheme
# symbol, and the value is the Database subclass.
ADAPTER_MAP = {}
# Hash of shared adapters that have been registered. The key is the
# adapter scheme symbol, and the value is the Sequel module containing
# the shared adapter.
SHARED_ADAPTER_MAP = {}
# Array of all databases to which Sequel has connected. If you are
# developing an application that can connect to an arbitrary number of
# databases, delete the database objects from this (or use the :keep_reference
# Database option or a block when connecting) or they will not get
# garbage collected.
DATABASES = []
# A Database object represents a virtual connection to a database.
# The Database class is meant to be subclassed by database adapters in order
# to provide the functionality needed for executing queries.
class Database
OPTS = Sequel::OPTS
end
require_relative "database/connecting"
require_relative "database/dataset"
require_relative "database/dataset_defaults"
require_relative "database/logging"
require_relative "database/features"
require_relative "database/misc"
require_relative "database/query"
require_relative "database/transactions"
require_relative "database/schema_generator"
require_relative "database/schema_methods"
end
sequel-5.63.0/lib/sequel/database/ 0000775 0000000 0000000 00000000000 14342141206 0016710 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/database/connecting.rb 0000664 0000000 0000000 00000027563 14342141206 0021401 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 4 - Methods relating to adapters, connecting, disconnecting, and sharding
# This methods involve the Database's connection pool.
# ---------------------
# Array of supported database adapters
ADAPTERS = %w'ado amalgalite ibmdb jdbc mock mysql mysql2 odbc oracle postgres sqlanywhere sqlite tinytds'.map(&:to_sym)
# The Database subclass for the given adapter scheme.
# Raises Sequel::AdapterNotFound if the adapter
# could not be loaded.
def self.adapter_class(scheme)
scheme.is_a?(Class) ? scheme : load_adapter(scheme.to_sym)
end
# Returns the scheme symbol for the Database class.
def self.adapter_scheme
@scheme
end
# Connects to a database. See Sequel.connect.
def self.connect(conn_string, opts = OPTS)
case conn_string
when String
if conn_string.start_with?('jdbc:')
c = adapter_class(:jdbc)
opts = opts.merge(:orig_opts=>opts.dup)
opts = {:uri=>conn_string}.merge!(opts)
else
uri = URI.parse(conn_string)
scheme = uri.scheme
c = adapter_class(scheme)
uri_options = c.send(:uri_to_options, uri)
uri.query.split('&').map{|s| s.split('=')}.each{|k,v| uri_options[k.to_sym] = v if k && !k.empty?} unless uri.query.to_s.strip.empty?
uri_options.to_a.each{|k,v| uri_options[k] = URI::DEFAULT_PARSER.unescape(v) if v.is_a?(String)}
opts = uri_options.merge(opts).merge!(:orig_opts=>opts.dup, :uri=>conn_string, :adapter=>scheme)
end
when Hash
opts = conn_string.merge(opts)
opts = opts.merge(:orig_opts=>opts.dup)
c = adapter_class(opts[:adapter_class] || opts[:adapter] || opts['adapter'])
else
raise Error, "Sequel::Database.connect takes either a Hash or a String, given: #{conn_string.inspect}"
end
opts = opts.inject({}) do |m, (k,v)|
k = :user if k.to_s == 'username'
m[k.to_sym] = v
m
end
begin
db = c.new(opts)
if defined?(yield)
return yield(db)
end
ensure
if defined?(yield)
db.disconnect if db
Sequel.synchronize{::Sequel::DATABASES.delete(db)}
end
end
db
end
# Load the adapter from the file system. Raises Sequel::AdapterNotFound
# if the adapter cannot be loaded, or if the adapter isn't registered
# correctly after being loaded. Options:
# :map :: The Hash in which to look for an already loaded adapter (defaults to ADAPTER_MAP).
# :subdir :: The subdirectory of sequel/adapters to look in, only to be used for loading
# subadapters.
def self.load_adapter(scheme, opts=OPTS)
map = opts[:map] || ADAPTER_MAP
if subdir = opts[:subdir]
file = "#{subdir}/#{scheme}"
else
file = scheme
end
unless obj = Sequel.synchronize{map[scheme]}
# attempt to load the adapter file
begin
require "sequel/adapters/#{file}"
rescue LoadError => e
# If subadapter file doesn't exist, just return,
# using the main adapter class without database customizations.
return if subdir
raise Sequel.convert_exception_class(e, AdapterNotFound)
end
# make sure we actually loaded the adapter
unless obj = Sequel.synchronize{map[scheme]}
raise AdapterNotFound, "Could not load #{file} adapter: adapter class not registered in ADAPTER_MAP"
end
end
obj
end
# Sets the adapter scheme for the Database class. Call this method in
# descendants of Database to allow connection using a URL. For example the
# following:
#
# class Sequel::MyDB::Database < Sequel::Database
# set_adapter_scheme :mydb
# ...
# end
#
# would allow connection using:
#
# Sequel.connect('mydb://user:password@dbserver/mydb')
def self.set_adapter_scheme(scheme) # :nodoc:
@scheme = scheme
Sequel.synchronize{ADAPTER_MAP[scheme] = self}
end
private_class_method :set_adapter_scheme
# Sets the given module as the shared adapter module for the given scheme.
# Used to register shared adapters for use by the mock adapter. Example:
#
# # in file sequel/adapters/shared/mydb.rb
# module Sequel::MyDB
# Sequel::Database.set_shared_adapter_scheme :mydb, self
#
# def self.mock_adapter_setup(db)
# # ...
# end
#
# module DatabaseMethods
# # ...
# end
#
# module DatasetMethods
# # ...
# end
# end
#
# would allow the mock adapter to return a Database instance that supports
# the MyDB syntax via:
#
# Sequel.connect('mock://mydb')
def self.set_shared_adapter_scheme(scheme, mod)
Sequel.synchronize{SHARED_ADAPTER_MAP[scheme] = mod}
end
# The connection pool for this Database instance. All Database instances have
# their own connection pools.
attr_reader :pool
# Returns the scheme symbol for this instance's class, which reflects which
# adapter is being used. In some cases, this can be the same as the
# +database_type+ (for native adapters), in others (i.e. adapters with
# subadapters), it will be different.
#
# Sequel.connect('jdbc:postgres://...').adapter_scheme
# # => :jdbc
def adapter_scheme
self.class.adapter_scheme
end
# Dynamically add new servers or modify server options at runtime. Also adds new
# servers to the connection pool. Only usable when using a sharded connection pool.
#
# servers argument should be a hash with server name symbol keys and hash or
# proc values. If a servers key is already in use, it's value is overridden
# with the value provided.
#
# DB.add_servers(f: {host: "hash_host_f"})
def add_servers(servers)
unless sharded?
raise Error, "cannot call Database#add_servers on a Database instance that does not use a sharded connection pool"
end
h = @opts[:servers]
Sequel.synchronize{h.merge!(servers)}
@pool.add_servers(servers.keys)
end
# The database type for this database object, the same as the adapter scheme
# by default. Should be overridden in adapters (especially shared adapters)
# to be the correct type, so that even if two separate Database objects are
# using different adapters you can tell that they are using the same database
# type. Even better, you can tell that two Database objects that are using
# the same adapter are connecting to different database types.
#
# Sequel.connect('jdbc:postgres://...').database_type
# # => :postgres
def database_type
adapter_scheme
end
# Disconnects all available connections from the connection pool. Any
# connections currently in use will not be disconnected. Options:
# :server :: Should be a symbol specifing the server to disconnect from,
# or an array of symbols to specify multiple servers.
#
# Example:
#
# DB.disconnect # All servers
# DB.disconnect(server: :server1) # Single server
# DB.disconnect(server: [:server1, :server2]) # Multiple servers
def disconnect(opts = OPTS)
pool.disconnect(opts)
end
# Should only be called by the connection pool code to disconnect a connection.
# By default, calls the close method on the connection object, since most
# adapters use that, but should be overwritten on other adapters.
def disconnect_connection(conn)
conn.close
end
# Dynamically remove existing servers from the connection pool. Only usable
# when using a sharded connection pool
#
# servers should be symbols or arrays of symbols. If a nonexistent server
# is specified, it is ignored. If no servers have been specified for
# this database, no changes are made. If you attempt to remove the :default server,
# an error will be raised.
#
# DB.remove_servers(:f1, :f2)
def remove_servers(*servers)
unless sharded?
raise Error, "cannot call Database#remove_servers on a Database instance that does not use a sharded connection pool"
end
h = @opts[:servers]
servers.flatten.each{|s| Sequel.synchronize{h.delete(s)}}
@pool.remove_servers(servers)
end
# An array of servers/shards for this Database object.
#
# DB.servers # Unsharded: => [:default]
# DB.servers # Sharded: => [:default, :server1, :server2]
def servers
pool.servers
end
# Connect to the given server/shard. Handles database-generic post-connection
# setup not handled by #connect, using the :after_connect and :connect_sqls
# options.
def new_connection(server)
conn = connect(server)
opts = server_opts(server)
if ac = opts[:after_connect]
if ac.arity == 2
ac.call(conn, server)
else
ac.call(conn)
end
end
if cs = opts[:connect_sqls]
cs.each do |sql|
log_connection_execute(conn, sql)
end
end
conn
end
# Returns true if the database is using a single-threaded connection pool.
def single_threaded?
@single_threaded
end
if RUBY_ENGINE == 'ruby' && RUBY_VERSION < '2.5'
# :nocov:
def synchronize(server=nil)
@pool.hold(server || :default){|conn| yield conn}
end
# :nocov:
else
# Acquires a database connection, yielding it to the passed block. This is
# useful if you want to make sure the same connection is used for all
# database queries in the block. It is also useful if you want to gain
# direct access to the underlying connection object if you need to do
# something Sequel does not natively support.
#
# If a server option is given, acquires a connection for that specific
# server, instead of the :default server.
#
# DB.synchronize do |conn|
# # ...
# end
def synchronize(server=nil, &block)
@pool.hold(server || :default, &block)
end
end
# Attempts to acquire a database connection. Returns true if successful.
# Will probably raise an Error if unsuccessful. If a server argument
# is given, attempts to acquire a database connection to the given
# server/shard.
def test_connection(server=nil)
synchronize(server){|conn|}
true
end
# Check whether the given connection is currently valid, by
# running a query against it. If the query fails, the
# connection should probably be removed from the connection
# pool.
def valid_connection?(conn)
sql = valid_connection_sql
begin
log_connection_execute(conn, sql)
rescue Sequel::DatabaseError, *database_error_classes
false
else
true
end
end
private
# The default options for the connection pool.
def connection_pool_default_options
{}
end
# Return the options for the given server by merging the generic
# options for all server with the specific options for the given
# server specified in the :servers option.
def server_opts(server)
opts = if @opts[:servers] and server_options = @opts[:servers][server]
case server_options
when Hash
@opts.merge(server_options)
when Proc
@opts.merge(server_options.call(self))
else
raise Error, 'Server opts should be a hash or proc'
end
elsif server.is_a?(Hash)
@opts.merge(server)
else
@opts.dup
end
opts.delete(:servers)
opts
end
# The SQL query to issue to check if a connection is valid.
def valid_connection_sql
@valid_connection_sql ||= select(nil).sql
end
end
end
sequel-5.63.0/lib/sequel/database/dataset.rb 0000664 0000000 0000000 00000005063 14342141206 0020666 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 3 - Methods that create datasets
# These methods all return instances of this database's dataset class.
# ---------------------
# Returns a dataset for the database. If the first argument is a string,
# the method acts as an alias for Database#fetch, returning a dataset for
# arbitrary SQL, with or without placeholders:
#
# DB['SELECT * FROM items'].all
# DB['SELECT * FROM items WHERE name = ?', my_name].all
#
# Otherwise, acts as an alias for Database#from, setting the primary
# table for the dataset:
#
# DB[:items].sql #=> "SELECT * FROM items"
def [](*args)
args.first.is_a?(String) ? fetch(*args) : from(*args)
end
# Returns a blank dataset for this database.
#
# DB.dataset # SELECT *
# DB.dataset.from(:items) # SELECT * FROM items
def dataset
@dataset_class.new(self)
end
# Fetches records for an arbitrary SQL statement. If a block is given,
# it is used to iterate over the records:
#
# DB.fetch('SELECT * FROM items'){|r| p r}
#
# The +fetch+ method returns a dataset instance:
#
# DB.fetch('SELECT * FROM items').all
#
# +fetch+ can also perform parameterized queries for protection against SQL
# injection:
#
# DB.fetch('SELECT * FROM items WHERE name = ?', my_name).all
#
# See caveats listed in Dataset#with_sql regarding datasets using custom
# SQL and the methods that can be called on them.
def fetch(sql, *args, &block)
ds = @default_dataset.with_sql(sql, *args)
ds.each(&block) if block
ds
end
# Returns a new dataset with the +from+ method invoked. If a block is given,
# it acts as a virtual row block
#
# DB.from(:items) # SELECT * FROM items
# DB.from{schema[:table]} # SELECT * FROM schema.table
def from(*args, &block)
if block
@default_dataset.from(*args, &block)
elsif args.length == 1 && (table = args[0]).is_a?(Symbol)
@default_dataset.send(:cached_dataset, :"_from_#{table}_ds"){@default_dataset.from(table)}
else
@default_dataset.from(*args)
end
end
# Returns a new dataset with the select method invoked.
#
# DB.select(1) # SELECT 1
# DB.select{server_version.function} # SELECT server_version()
# DB.select(:id).from(:items) # SELECT id FROM items
def select(*args, &block)
@default_dataset.select(*args, &block)
end
end
end
sequel-5.63.0/lib/sequel/database/dataset_defaults.rb 0000664 0000000 0000000 00000006016 14342141206 0022554 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 5 - Methods that set defaults for created datasets
# This methods change the default behavior of this database's datasets.
# ---------------------
# The class to use for creating datasets. Should respond to
# new with the Database argument as the first argument, and
# an optional options hash.
attr_reader :dataset_class
# If the database has any dataset modules associated with it,
# use a subclass of the given class that includes the modules
# as the dataset class.
def dataset_class=(c)
unless @dataset_modules.empty?
c = Class.new(c)
@dataset_modules.each{|m| c.send(:include, m)}
end
@dataset_class = c
reset_default_dataset
end
# Equivalent to extending all datasets produced by the database with a
# module. What it actually does is use a subclass of the current dataset_class
# as the new dataset_class, and include the module in the subclass.
# Instead of a module, you can provide a block that is used to create an
# anonymous module.
#
# This allows you to override any of the dataset methods even if they are
# defined directly on the dataset class that this Database object uses.
#
# If a block is given, a Dataset::DatasetModule instance is created, allowing
# for the easy creation of named dataset methods that will do caching.
#
# Examples:
#
# # Introspect columns for all of DB's datasets
# DB.extend_datasets(Sequel::ColumnsIntrospection)
#
# # Trace all SELECT queries by printing the SQL and the full backtrace
# DB.extend_datasets do
# def fetch_rows(sql)
# puts sql
# puts caller
# super
# end
# end
#
# # Add some named dataset methods
# DB.extend_datasets do
# order :by_id, :id
# select :with_id_and_name, :id, :name
# where :active, :active
# end
#
# DB[:table].active.with_id_and_name.by_id
# # SELECT id, name FROM table WHERE active ORDER BY id
def extend_datasets(mod=nil, &block)
raise(Error, "must provide either mod or block, not both") if mod && block
mod = Dataset::DatasetModule.new(&block) if block
if @dataset_modules.empty?
@dataset_modules = [mod]
@dataset_class = Class.new(@dataset_class)
else
@dataset_modules << mod
end
@dataset_class.send(:include, mod)
reset_default_dataset
end
private
# The default dataset class to use for the database
def dataset_class_default
Sequel::Dataset
end
# Reset the default dataset used by most Database methods that create datasets.
def reset_default_dataset
Sequel.synchronize{@symbol_literal_cache.clear}
@default_dataset = dataset
end
# Whether to quote identifiers by default for this database, true by default.
def quote_identifiers_default
true
end
end
end
sequel-5.63.0/lib/sequel/database/features.rb 0000664 0000000 0000000 00000011215 14342141206 0021053 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 9 - Methods that describe what the database supports
# These methods all return booleans, with most describing whether or not the
# database supprots a given feature.
# ---------------------
# Whether the database uses a global namespace for the index, true by default. If
# false, the indexes are going to be namespaced per table.
def global_index_namespace?
true
end
# Whether the database supports CREATE TABLE IF NOT EXISTS syntax,
# false by default.
def supports_create_table_if_not_exists?
false
end
# Whether the database supports deferrable constraints, false
# by default as few databases do.
def supports_deferrable_constraints?
false
end
# Whether the database supports deferrable foreign key constraints,
# false by default as few databases do.
def supports_deferrable_foreign_key_constraints?
supports_deferrable_constraints?
end
# Whether the database supports DROP TABLE IF EXISTS syntax,
# false by default.
def supports_drop_table_if_exists?
supports_create_table_if_not_exists?
end
# Whether the database supports Database#foreign_key_list for
# parsing foreign keys.
def supports_foreign_key_parsing?
respond_to?(:foreign_key_list)
end
# Whether the database supports Database#indexes for parsing indexes.
def supports_index_parsing?
respond_to?(:indexes)
end
# Whether the database supports partial indexes (indexes on a subset of a table),
# false by default.
def supports_partial_indexes?
false
end
# Whether the database and adapter support prepared transactions
# (two-phase commit), false by default.
def supports_prepared_transactions?
false
end
# Whether the database and adapter support savepoints, false by default.
def supports_savepoints?
false
end
# Whether the database and adapter support savepoints inside prepared transactions
# (two-phase commit), false by default.
def supports_savepoints_in_prepared_transactions?
supports_prepared_transactions? && supports_savepoints?
end
# Whether the database supports schema parsing via Database#schema.
def supports_schema_parsing?
respond_to?(:schema_parse_table, true)
end
# Whether the database supports Database#tables for getting list of tables.
def supports_table_listing?
respond_to?(:tables)
end
#
# Whether the database supports Database#views for getting list of views.
def supports_view_listing?
respond_to?(:views)
end
# Whether the database and adapter support transaction isolation levels, false by default.
def supports_transaction_isolation_levels?
false
end
# Whether DDL statements work correctly in transactions, false by default.
def supports_transactional_ddl?
false
end
# Whether CREATE VIEW ... WITH CHECK OPTION is supported, false by default.
def supports_views_with_check_option?
!!view_with_check_option_support
end
# Whether CREATE VIEW ... WITH LOCAL CHECK OPTION is supported, false by default.
def supports_views_with_local_check_option?
view_with_check_option_support == :local
end
private
# Whether the database supports adding primary key constraints on NULLable columns,
# automatically making them NOT NULL. If false, the columns must be set NOT NULL
# before the primary key constraint is added.
def can_add_primary_key_constraint_on_nullable_columns?
true
end
# Whether this dataset considers unquoted identifiers as uppercase. True
# by default as that is the SQL standard
def folds_unquoted_identifiers_to_uppercase?
true
end
# Whether the database supports combining multiple alter table
# operations into a single query, false by default.
def supports_combining_alter_table_ops?
false
end
# Whether the database supports CREATE OR REPLACE VIEW. If not, support
# will be emulated by dropping the view first. false by default.
def supports_create_or_replace_view?
false
end
# Whether the database supports named column constraints. True
# by default. Those that don't support named column constraints
# have to have column constraints converted to table constraints
# if the column constraints have names.
def supports_named_column_constraints?
true
end
# Don't advertise support for WITH CHECK OPTION by default.
def view_with_check_option_support
nil
end
end
end
sequel-5.63.0/lib/sequel/database/logging.rb 0000664 0000000 0000000 00000006023 14342141206 0020664 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 6 - Methods relating to logging
# This methods affect relating to the logging of executed SQL.
# ---------------------
# Numeric specifying the duration beyond which queries are logged at warn
# level instead of info level.
attr_accessor :log_warn_duration
# Array of SQL loggers to use for this database.
attr_accessor :loggers
# Whether to include information about the connection in use when logging queries.
attr_accessor :log_connection_info
# Log level at which to log SQL queries. This is actually the method
# sent to the logger, so it should be the method name symbol. The default
# is :info, it can be set to :debug to log at DEBUG level.
attr_accessor :sql_log_level
# Log a message at error level, with information about the exception.
def log_exception(exception, message)
log_each(:error, "#{exception.class}: #{exception.message.strip if exception.message}: #{message}")
end
# Log a message at level info to all loggers.
def log_info(message, args=nil)
log_each(:info, args ? "#{message}; #{args.inspect}" : message)
end
# Yield to the block, logging any errors at error level to all loggers,
# and all other queries with the duration at warn or info level.
def log_connection_yield(sql, conn, args=nil)
return yield if skip_logging?
sql = "#{connection_info(conn) if conn && log_connection_info}#{sql}#{"; #{args.inspect}" if args}"
timer = Sequel.start_timer
begin
yield
rescue => e
log_exception(e, sql)
raise
ensure
log_duration(Sequel.elapsed_seconds_since(timer), sql) unless e
end
end
# Remove any existing loggers and just use the given logger:
#
# DB.logger = Logger.new($stdout)
def logger=(logger)
@loggers = Array(logger)
end
private
# Determine if logging should be skipped. Defaults to true if no loggers
# have been specified.
def skip_logging?
@loggers.empty?
end
# String including information about the connection, for use when logging
# connection info.
def connection_info(conn)
"(conn: #{conn.__id__}) "
end
# Log the given SQL and then execute it on the connection, used by
# the transaction code.
def log_connection_execute(conn, sql)
log_connection_yield(sql, conn){conn.public_send(connection_execute_method, sql)}
end
# Log message with message prefixed by duration at info level, or
# warn level if duration is greater than log_warn_duration.
def log_duration(duration, message)
log_each((lwd = log_warn_duration and duration >= lwd) ? :warn : sql_log_level, "(#{sprintf('%0.6fs', duration)}) #{message}")
end
# Log message at level (which should be :error, :warn, or :info)
# to all loggers.
def log_each(level, message)
@loggers.each{|logger| logger.public_send(level, message)}
end
end
end
sequel-5.63.0/lib/sequel/database/misc.rb 0000664 0000000 0000000 00000054410 14342141206 0020174 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 7 - Miscellaneous methods
# These methods don't fit neatly into another category.
# ---------------------
# Hash of extension name symbols to callable objects to load the extension
# into the Database object (usually by extending it with a module defined
# in the extension).
EXTENSIONS = {}
# The general default size for string columns for all Sequel::Database
# instances.
DEFAULT_STRING_COLUMN_SIZE = 255
# Empty exception regexp to class map, used by default if Sequel doesn't
# have specific support for the database in use.
DEFAULT_DATABASE_ERROR_REGEXPS = {}.freeze
# Mapping of schema type symbols to class or arrays of classes for that
# symbol.
SCHEMA_TYPE_CLASSES = {:string=>String, :integer=>Integer, :date=>Date, :datetime=>[Time, DateTime].freeze,
:time=>Sequel::SQLTime, :boolean=>[TrueClass, FalseClass].freeze, :float=>Float, :decimal=>BigDecimal,
:blob=>Sequel::SQL::Blob}.freeze
# Nested hook Proc; each new hook Proc just wraps the previous one.
@initialize_hook = proc{|db| }
# Register a hook that will be run when a new Database is instantiated. It is
# called with the new database handle.
def self.after_initialize(&block)
raise Error, "must provide block to after_initialize" unless block
Sequel.synchronize do
previous = @initialize_hook
@initialize_hook = proc do |db|
previous.call(db)
block.call(db)
end
end
end
# Apply an extension to all Database objects created in the future.
def self.extension(*extensions)
after_initialize{|db| db.extension(*extensions)}
end
# Register an extension callback for Database objects. ext should be the
# extension name symbol, and mod should either be a Module that the
# database is extended with, or a callable object called with the database
# object. If mod is not provided, a block can be provided and is treated
# as the mod object.
def self.register_extension(ext, mod=nil, &block)
if mod
raise(Error, "cannot provide both mod and block to Database.register_extension") if block
if mod.is_a?(Module)
block = proc{|db| db.extend(mod)}
else
block = mod
end
end
Sequel.synchronize{EXTENSIONS[ext] = block}
end
# Run the after_initialize hook for the given +instance+.
def self.run_after_initialize(instance)
@initialize_hook.call(instance)
end
# Converts a uri to an options hash. These options are then passed
# to a newly created database object.
def self.uri_to_options(uri)
{
:user => uri.user,
:password => uri.password,
:port => uri.port,
:host => uri.hostname,
:database => (m = /\/(.*)/.match(uri.path)) && (m[1])
}
end
private_class_method :uri_to_options
# The options hash for this database
attr_reader :opts
# Set the timezone to use for this database, overridding Sequel.database_timezone.
attr_writer :timezone
# The specific default size of string columns for this Sequel::Database, usually 255 by default.
attr_accessor :default_string_column_size
# Whether to check the bytesize of strings before typecasting (to avoid typecasting strings that
# would be too long for the given type), true by default. Strings that are too long will raise
# a typecasting error.
attr_accessor :check_string_typecast_bytesize
# Constructs a new instance of a database connection with the specified
# options hash.
#
# Accepts the following options:
# :after_connect :: A callable object called after each new connection is made, with the
# connection object (and server argument if the callable accepts 2 arguments),
# useful for customizations that you want to apply to all connections.
# :before_preconnect :: Callable that runs after extensions from :preconnect_extensions are loaded,
# but before any connections are created.
# :cache_schema :: Whether schema should be cached for this Database instance
# :check_string_typecast_bytesize :: Whether to check the bytesize of strings before typecasting.
# :connect_sqls :: An array of sql strings to execute on each new connection, after :after_connect runs.
# :default_string_column_size :: The default size of string columns, 255 by default.
# :extensions :: Extensions to load into this Database instance. Can be a symbol, array of symbols,
# or string with extensions separated by columns. These extensions are loaded after
# connections are made by the :preconnect option.
# :keep_reference :: Whether to keep a reference to this instance in Sequel::DATABASES, true by default.
# :logger :: A specific logger to use.
# :loggers :: An array of loggers to use.
# :log_connection_info :: Whether connection information should be logged when logging queries.
# :log_warn_duration :: The number of elapsed seconds after which queries should be logged at warn level.
# :name :: A name to use for the Database object, displayed in PoolTimeout.
# :preconnect :: Automatically create the maximum number of connections, so that they don't
# need to be created as needed. This is useful when connecting takes a long time
# and you want to avoid possible latency during runtime.
# Set to :concurrently to create the connections in separate threads. Otherwise
# they'll be created sequentially.
# :preconnect_extensions :: Similar to the :extensions option, but loads the extensions before the
# connections are made by the :preconnect option.
# :quote_identifiers :: Whether to quote identifiers.
# :servers :: A hash specifying a server/shard specific options, keyed by shard symbol.
# :single_threaded :: Whether to use a single-threaded connection pool.
# :sql_log_level :: Method to use to log SQL to a logger, :info by default.
#
# For sharded connection pools, :after_connect and :connect_sqls can be specified per-shard.
#
# All options given are also passed to the connection pool. Additional options respected by
# the connection pool are :max_connections, :pool_timeout, :servers, and :servers_hash. See the
# connection pool documentation for details.
def initialize(opts = OPTS)
@opts ||= opts
@opts = connection_pool_default_options.merge(@opts)
@loggers = Array(@opts[:logger]) + Array(@opts[:loggers])
@opts[:servers] = {} if @opts[:servers].is_a?(String)
@sharded = !!@opts[:servers]
@opts[:adapter_class] = self.class
@opts[:single_threaded] = @single_threaded = typecast_value_boolean(@opts.fetch(:single_threaded, Sequel.single_threaded))
@default_string_column_size = @opts[:default_string_column_size] || DEFAULT_STRING_COLUMN_SIZE
@check_string_typecast_bytesize = typecast_value_boolean(@opts.fetch(:check_string_typecast_bytesize, true))
@schemas = {}
@prepared_statements = {}
@transactions = {}
@symbol_literal_cache = {}
@timezone = nil
@dataset_class = dataset_class_default
@cache_schema = typecast_value_boolean(@opts.fetch(:cache_schema, true))
@dataset_modules = []
@loaded_extensions = []
@schema_type_classes = SCHEMA_TYPE_CLASSES.dup
self.sql_log_level = @opts[:sql_log_level] ? @opts[:sql_log_level].to_sym : :info
self.log_warn_duration = @opts[:log_warn_duration]
self.log_connection_info = typecast_value_boolean(@opts[:log_connection_info])
@pool = ConnectionPool.get_pool(self, @opts)
reset_default_dataset
adapter_initialize
keep_reference = typecast_value_boolean(@opts[:keep_reference]) != false
begin
Sequel.synchronize{::Sequel::DATABASES.push(self)} if keep_reference
Sequel::Database.run_after_initialize(self)
initialize_load_extensions(:preconnect_extensions)
if before_preconnect = @opts[:before_preconnect]
before_preconnect.call(self)
end
if typecast_value_boolean(@opts[:preconnect]) && @pool.respond_to?(:preconnect, true)
concurrent = typecast_value_string(@opts[:preconnect]) == "concurrently"
@pool.send(:preconnect, concurrent)
end
initialize_load_extensions(:extensions)
test_connection if typecast_value_boolean(@opts.fetch(:test, true)) && respond_to?(:connect, true)
rescue
Sequel.synchronize{::Sequel::DATABASES.delete(self)} if keep_reference
raise
end
end
# Freeze internal data structures for the Database instance.
def freeze
valid_connection_sql
metadata_dataset
@opts.freeze
@loggers.freeze
@pool.freeze
@dataset_class.freeze
@dataset_modules.freeze
@schema_type_classes.freeze
@loaded_extensions.freeze
metadata_dataset
super
end
# Disallow dup/clone for Database instances
undef_method :dup, :clone, :initialize_copy
# :nocov:
if RUBY_VERSION >= '1.9.3'
# :nocov:
undef_method :initialize_clone, :initialize_dup
end
# Cast the given type to a literal type
#
# DB.cast_type_literal(Float) # double precision
# DB.cast_type_literal(:foo) # foo
def cast_type_literal(type)
type_literal(:type=>type)
end
# Load an extension into the receiver. In addition to requiring the extension file, this
# also modifies the database to work with the extension (usually extending it with a
# module defined in the extension file). If no related extension file exists or the
# extension does not have specific support for Database objects, an Error will be raised.
# Returns self.
def extension(*exts)
Sequel.extension(*exts)
exts.each do |ext|
if pr = Sequel.synchronize{EXTENSIONS[ext]}
if Sequel.synchronize{@loaded_extensions.include?(ext) ? false : (@loaded_extensions << ext)}
pr.call(self)
end
else
raise(Error, "Extension #{ext} does not have specific support handling individual databases (try: Sequel.extension #{ext.inspect})")
end
end
self
end
# Convert the given timestamp from the application's timezone,
# to the databases's timezone or the default database timezone if
# the database does not have a timezone.
def from_application_timestamp(v)
Sequel.convert_output_timestamp(v, timezone)
end
# Returns a string representation of the database object including the
# class name and connection URI and options used when connecting (if any).
def inspect
a = []
a << uri.inspect if uri
if (oo = opts[:orig_opts]) && !oo.empty?
a << oo.inspect
end
"#<#{self.class}: #{a.join(' ')}>"
end
# Proxy the literal call to the dataset.
#
# DB.literal(1) # 1
# DB.literal(:a) # a
# DB.literal('a') # 'a'
def literal(v)
schema_utility_dataset.literal(v)
end
# Return the literalized version of the symbol if cached, or
# nil if it is not cached.
def literal_symbol(sym)
Sequel.synchronize{@symbol_literal_cache[sym]}
end
# Set the cached value of the literal symbol.
def literal_symbol_set(sym, lit)
Sequel.synchronize{@symbol_literal_cache[sym] = lit}
end
# Synchronize access to the prepared statements cache.
def prepared_statement(name)
Sequel.synchronize{prepared_statements[name]}
end
# Proxy the quote_identifier method to the dataset,
# useful for quoting unqualified identifiers for use
# outside of datasets.
def quote_identifier(v)
schema_utility_dataset.quote_identifier(v)
end
# Return ruby class or array of classes for the given type symbol.
def schema_type_class(type)
@schema_type_classes[type]
end
# Default serial primary key options, used by the table creation code.
def serial_primary_key_options
{:primary_key => true, :type => Integer, :auto_increment => true}
end
# Cache the prepared statement object at the given name.
def set_prepared_statement(name, ps)
Sequel.synchronize{prepared_statements[name] = ps}
end
# Whether this database instance uses multiple servers, either for sharding
# or for primary/replica configurations.
def sharded?
@sharded
end
# The timezone to use for this database, defaulting to Sequel.database_timezone.
def timezone
@timezone || Sequel.database_timezone
end
# Convert the given timestamp to the application's timezone,
# from the databases's timezone or the default database timezone if
# the database does not have a timezone.
def to_application_timestamp(v)
Sequel.convert_timestamp(v, timezone)
end
# Typecast the value to the given column_type. Calls
# typecast_value_#{column_type} if the method exists,
# otherwise returns the value.
# This method should raise Sequel::InvalidValue if assigned value
# is invalid.
def typecast_value(column_type, value)
return nil if value.nil?
meth = "typecast_value_#{column_type}"
begin
# Allow calling private methods as per-type typecasting methods are private
respond_to?(meth, true) ? send(meth, value) : value
rescue ArgumentError, TypeError => e
raise Sequel.convert_exception_class(e, InvalidValue)
end
end
# Returns the URI use to connect to the database. If a URI
# was not used when connecting, returns nil.
def uri
opts[:uri]
end
# Explicit alias of uri for easier subclassing.
def url
uri
end
private
# Per adapter initialization method, empty by default.
def adapter_initialize
end
# Returns true when the object is considered blank.
# The only objects that are blank are nil, false,
# strings with all whitespace, and ones that respond
# true to empty?
def blank_object?(obj)
return obj.blank? if obj.respond_to?(:blank?)
case obj
when NilClass, FalseClass
true
when Numeric, TrueClass
false
when String
obj.strip.empty?
else
obj.respond_to?(:empty?) ? obj.empty? : false
end
end
# An enumerable yielding pairs of regexps and exception classes, used
# to match against underlying driver exception messages in
# order to raise a more specific Sequel::DatabaseError subclass.
def database_error_regexps
DEFAULT_DATABASE_ERROR_REGEXPS
end
# Return the Sequel::DatabaseError subclass to wrap the given
# exception in.
def database_error_class(exception, opts)
database_specific_error_class(exception, opts) || DatabaseError
end
# Return the SQLState for the given exception, if one can be determined
def database_exception_sqlstate(exception, opts)
nil
end
# Return a specific Sequel::DatabaseError exception class if
# one is appropriate for the underlying exception,
# or nil if there is no specific exception class.
def database_specific_error_class(exception, opts)
return DatabaseDisconnectError if disconnect_error?(exception, opts)
if sqlstate = database_exception_sqlstate(exception, opts)
if klass = database_specific_error_class_from_sqlstate(sqlstate)
return klass
end
else
database_error_regexps.each do |regexp, klss|
return klss if exception.message =~ regexp
end
end
nil
end
NOT_NULL_CONSTRAINT_SQLSTATES = %w'23502'.freeze.each(&:freeze)
FOREIGN_KEY_CONSTRAINT_SQLSTATES = %w'23503 23506 23504'.freeze.each(&:freeze)
UNIQUE_CONSTRAINT_SQLSTATES = %w'23505'.freeze.each(&:freeze)
CHECK_CONSTRAINT_SQLSTATES = %w'23513 23514'.freeze.each(&:freeze)
SERIALIZATION_CONSTRAINT_SQLSTATES = %w'40001'.freeze.each(&:freeze)
# Given the SQLState, return the appropriate DatabaseError subclass.
def database_specific_error_class_from_sqlstate(sqlstate)
case sqlstate
when *NOT_NULL_CONSTRAINT_SQLSTATES
NotNullConstraintViolation
when *FOREIGN_KEY_CONSTRAINT_SQLSTATES
ForeignKeyConstraintViolation
when *UNIQUE_CONSTRAINT_SQLSTATES
UniqueConstraintViolation
when *CHECK_CONSTRAINT_SQLSTATES
CheckConstraintViolation
when *SERIALIZATION_CONSTRAINT_SQLSTATES
SerializationFailure
end
end
# Return true if exception represents a disconnect error, false otherwise.
def disconnect_error?(exception, opts)
opts[:disconnect]
end
# Load extensions during initialization from the given key in opts.
def initialize_load_extensions(key)
case exts = @opts[key]
when String
extension(*exts.split(',').map(&:to_sym))
when Array
extension(*exts)
when Symbol
extension(exts)
when nil
# nothing
else
raise Error, "unsupported Database #{key.inspect} option: #{@opts[key].inspect}"
end
end
# Convert the given exception to an appropriate Sequel::DatabaseError
# subclass, keeping message and backtrace.
def raise_error(exception, opts=OPTS)
if !opts[:classes] || Array(opts[:classes]).any?{|c| exception.is_a?(c)}
raise Sequel.convert_exception_class(exception, database_error_class(exception, opts))
else
raise exception
end
end
# Swallow database errors, unless they are connect/disconnect errors.
def swallow_database_error
yield
rescue Sequel::DatabaseDisconnectError, DatabaseConnectionError
# Always raise disconnect errors
raise
rescue Sequel::DatabaseError
# Don't raise other database errors.
nil
# else
# Don't rescue other exceptions, they will be raised normally.
end
# Check the bytesize of a string before conversion. There is no point
# trying to typecast strings that would be way too long.
def typecast_check_string_length(string, max_size)
if @check_string_typecast_bytesize && string.bytesize > max_size
raise InvalidValue, "string too long to typecast (bytesize: #{string.bytesize}, max: #{max_size})"
end
string
end
# Check the bytesize of the string value, if value is a string.
def typecast_check_length(value, max_size)
typecast_check_string_length(value, max_size) if String === value
value
end
# Typecast the value to an SQL::Blob
def typecast_value_blob(value)
value.is_a?(Sequel::SQL::Blob) ? value : Sequel::SQL::Blob.new(value)
end
# Typecast the value to true, false, or nil
def typecast_value_boolean(value)
case value
when false, 0, "0", /\Af(alse)?\z/i, /\Ano?\z/i
false
else
blank_object?(value) ? nil : true
end
end
# Typecast the value to a Date
def typecast_value_date(value)
case value
when DateTime, Time
Date.new(value.year, value.month, value.day)
when Date
value
when String
Sequel.string_to_date(typecast_check_string_length(value, 100))
when Hash
Date.new(*[:year, :month, :day].map{|x| typecast_check_length(value[x] || value[x.to_s], 100).to_i})
else
raise InvalidValue, "invalid value for Date: #{value.inspect}"
end
end
# Typecast the value to a DateTime or Time depending on Sequel.datetime_class
def typecast_value_datetime(value)
case value
when String
Sequel.typecast_to_application_timestamp(typecast_check_string_length(value, 100))
when Hash
[:year, :month, :day, :hour, :minute, :second, :nanos, :offset].each do |x|
typecast_check_length(value[x] || value[x.to_s], 100)
end
Sequel.typecast_to_application_timestamp(value)
else
Sequel.typecast_to_application_timestamp(value)
end
end
if RUBY_VERSION >= '2.4'
# Typecast a string to a BigDecimal
alias _typecast_value_string_to_decimal BigDecimal
else
# :nocov:
def _typecast_value_string_to_decimal(value)
d = BigDecimal(value)
if d.zero?
# BigDecimal parsing is loose by default, returning a 0 value for
# invalid input. If a zero value is received, use Float to check
# for validity.
begin
Float(value)
rescue ArgumentError
raise InvalidValue, "invalid value for BigDecimal: #{value.inspect}"
end
end
d
end
# :nocov:
end
# Typecast the value to a BigDecimal
def typecast_value_decimal(value)
case value
when BigDecimal
value
when Numeric
BigDecimal(value.to_s)
when String
_typecast_value_string_to_decimal(typecast_check_string_length(value, 1000))
else
raise InvalidValue, "invalid value for BigDecimal: #{value.inspect}"
end
end
# Typecast the value to a Float
def typecast_value_float(value)
Float(typecast_check_length(value, 1000))
end
# Typecast the value to an Integer
def typecast_value_integer(value)
case value
when String
typecast_check_string_length(value, 100)
if value =~ /\A-?0+(\d)/
Integer(value, 10)
else
Integer(value)
end
else
Integer(value)
end
end
# Typecast the value to a String
def typecast_value_string(value)
case value
when Hash, Array
raise Sequel::InvalidValue, "invalid value for String: #{value.inspect}"
else
value.to_s
end
end
# Typecast the value to a Time
def typecast_value_time(value)
case value
when Time
if value.is_a?(SQLTime)
value
else
SQLTime.create(value.hour, value.min, value.sec, value.nsec/1000.0)
end
when String
Sequel.string_to_time(typecast_check_string_length(value, 100))
when Hash
SQLTime.create(*[:hour, :minute, :second].map{|x| typecast_check_length(value[x] || value[x.to_s], 100).to_i})
else
raise Sequel::InvalidValue, "invalid value for Time: #{value.inspect}"
end
end
end
end
sequel-5.63.0/lib/sequel/database/query.rb 0000664 0000000 0000000 00000034032 14342141206 0020404 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 1 - Methods that execute queries and/or return results
# This methods generally execute SQL code on the database server.
# ---------------------
COLUMN_SCHEMA_DATETIME_TYPES = [:date, :datetime].freeze
COLUMN_SCHEMA_STRING_TYPES = [:string, :blob, :date, :datetime, :time, :enum, :set, :interval].freeze
# The prepared statement object hash for this database, keyed by name symbol
attr_reader :prepared_statements
# Whether the schema should be cached for this database. True by default
# for performance, can be set to false to always issue a database query to
# get the schema.
attr_accessor :cache_schema
# Runs the supplied SQL statement string on the database server.
# Returns self so it can be safely chained:
#
# DB << "UPDATE albums SET artist_id = NULL" << "DROP TABLE artists"
def <<(sql)
run(sql)
self
end
# Call the prepared statement with the given name with the given hash
# of arguments.
#
# DB[:items].where(id: 1).prepare(:first, :sa)
# DB.call(:sa) # SELECT * FROM items WHERE id = 1
def call(ps_name, hash=OPTS, &block)
prepared_statement(ps_name).call(hash, &block)
end
# Method that should be used when submitting any DDL (Data Definition
# Language) SQL, such as +create_table+. By default, calls +execute_dui+.
# This method should not be called directly by user code.
def execute_ddl(sql, opts=OPTS, &block)
execute_dui(sql, opts, &block)
end
# Method that should be used when issuing a DELETE or UPDATE
# statement. By default, calls execute.
# This method should not be called directly by user code.
def execute_dui(sql, opts=OPTS, &block)
execute(sql, opts, &block)
end
# Method that should be used when issuing a INSERT
# statement. By default, calls execute_dui.
# This method should not be called directly by user code.
def execute_insert(sql, opts=OPTS, &block)
execute_dui(sql, opts, &block)
end
# Returns a single value from the database, see Dataset#get.
#
# DB.get(1) # SELECT 1
# # => 1
# DB.get{server_version.function} # SELECT server_version()
def get(*args, &block)
@default_dataset.get(*args, &block)
end
# Runs the supplied SQL statement string on the database server. Returns nil.
# Options:
# :server :: The server to run the SQL on.
#
# DB.run("SET some_server_variable = 42")
def run(sql, opts=OPTS)
sql = literal(sql) if sql.is_a?(SQL::PlaceholderLiteralString)
execute_ddl(sql, opts)
nil
end
# Returns the schema for the given table as an array with all members being arrays of length 2,
# the first member being the column name, and the second member being a hash of column information.
# The table argument can also be a dataset, as long as it only has one table.
# Available options are:
#
# :reload :: Ignore any cached results, and get fresh information from the database.
# :schema :: An explicit schema to use. It may also be implicitly provided
# via the table name.
#
# If schema parsing is supported by the database, the column information hash should contain at least the
# following entries:
#
# :allow_null :: Whether NULL is an allowed value for the column.
# :db_type :: The database type for the column, as a database specific string.
# :default :: The database default for the column, as a database specific string, or nil if there is
# no default value.
# :primary_key :: Whether the columns is a primary key column. If this column is not present,
# it means that primary key information is unavailable, not that the column
# is not a primary key.
# :ruby_default :: The database default for the column, as a ruby object. In many cases, complex
# database defaults cannot be parsed into ruby objects, in which case nil will be
# used as the value.
# :type :: A symbol specifying the type, such as :integer or :string.
#
# Example:
#
# DB.schema(:artists)
# # [[:id,
# # {:type=>:integer,
# # :primary_key=>true,
# # :default=>"nextval('artist_id_seq'::regclass)",
# # :ruby_default=>nil,
# # :db_type=>"integer",
# # :allow_null=>false}],
# # [:name,
# # {:type=>:string,
# # :primary_key=>false,
# # :default=>nil,
# # :ruby_default=>nil,
# # :db_type=>"text",
# # :allow_null=>false}]]
def schema(table, opts=OPTS)
raise(Error, 'schema parsing is not implemented on this database') unless supports_schema_parsing?
opts = opts.dup
tab = if table.is_a?(Dataset)
o = table.opts
from = o[:from]
raise(Error, "can only parse the schema for a dataset with a single from table") unless from && from.length == 1 && !o.include?(:join) && !o.include?(:sql)
table.first_source_table
else
table
end
qualifiers = split_qualifiers(tab)
table_name = qualifiers.pop
sch = qualifiers.pop
information_schema_schema = case qualifiers.length
when 1
Sequel.identifier(*qualifiers)
when 2
Sequel.qualify(*qualifiers)
end
if table.is_a?(Dataset)
quoted_name = table.literal(tab)
opts[:dataset] = table
else
quoted_name = schema_utility_dataset.literal(table)
end
opts[:schema] = sch if sch && !opts.include?(:schema)
opts[:information_schema_schema] = information_schema_schema if information_schema_schema && !opts.include?(:information_schema_schema)
Sequel.synchronize{@schemas.delete(quoted_name)} if opts[:reload]
if v = Sequel.synchronize{@schemas[quoted_name]}
return v
end
cols = schema_parse_table(table_name, opts)
raise(Error, "schema parsing returned no columns, table #{table_name.inspect} probably doesn't exist") if cols.nil? || cols.empty?
primary_keys = 0
auto_increment_set = false
cols.each do |_,c|
auto_increment_set = true if c.has_key?(:auto_increment)
primary_keys += 1 if c[:primary_key]
end
cols.each do |_,c|
c[:ruby_default] = column_schema_to_ruby_default(c[:default], c[:type]) unless c.has_key?(:ruby_default)
if c[:primary_key] && !auto_increment_set
# If adapter didn't set it, assume that integer primary keys are auto incrementing
c[:auto_increment] = primary_keys == 1 && !!(c[:db_type] =~ /int/io)
end
if !c[:max_length] && c[:type] == :string && (max_length = column_schema_max_length(c[:db_type]))
c[:max_length] = max_length
end
if !c[:max_value] && !c[:min_value] && c[:type] == :integer && (min_max = column_schema_integer_min_max_values(c[:db_type]))
c[:min_value], c[:max_value] = min_max
end
end
schema_post_process(cols)
Sequel.synchronize{@schemas[quoted_name] = cols} if cache_schema
cols
end
# Returns true if a table with the given name exists. This requires a query
# to the database.
#
# DB.table_exists?(:foo) # => false
# # SELECT NULL FROM foo LIMIT 1
#
# Note that since this does a SELECT from the table, it can give false negatives
# if you don't have permission to SELECT from the table.
def table_exists?(name)
sch, table_name = schema_and_table(name)
name = SQL::QualifiedIdentifier.new(sch, table_name) if sch
ds = from(name)
transaction(:savepoint=>:only){_table_exists?(ds)}
true
rescue DatabaseError
false
end
private
# Should raise an error if the table doesn't not exist,
# and not raise an error if the table does exist.
def _table_exists?(ds)
ds.get(SQL::AliasedExpression.new(Sequel::NULL, :nil))
end
# Whether the type should be treated as a string type when parsing the
# column schema default value.
def column_schema_default_string_type?(type)
COLUMN_SCHEMA_STRING_TYPES.include?(type)
end
# Transform the given normalized default string into a ruby object for the
# given type.
def column_schema_default_to_ruby_value(default, type)
case type
when :boolean
case default
when /[f0]/i
false
when /[t1]/i
true
end
when :string, :enum, :set, :interval
default
when :blob
Sequel::SQL::Blob.new(default)
when :integer
Integer(default)
when :float
Float(default)
when :date
Sequel.string_to_date(default)
when :datetime
Sequel.string_to_datetime(default)
when :time
Sequel.string_to_time(default)
when :decimal
BigDecimal(default)
end
end
# Normalize the default value string for the given type
# and return the normalized value.
def column_schema_normalize_default(default, type)
if column_schema_default_string_type?(type)
return unless m = /\A'(.*)'\z/.match(default)
m[1].gsub("''", "'")
else
default
end
end
# Convert the given default, which should be a database specific string, into
# a ruby object.
def column_schema_to_ruby_default(default, type)
return default unless default.is_a?(String)
if COLUMN_SCHEMA_DATETIME_TYPES.include?(type)
if /now|today|CURRENT|getdate|\ADate\(\)\z/i.match(default)
if type == :date
return Sequel::CURRENT_DATE
else
return Sequel::CURRENT_TIMESTAMP
end
end
end
default = column_schema_normalize_default(default, type)
column_schema_default_to_ruby_value(default, type) rescue nil
end
INTEGER1_MIN_MAX = [-128, 127].freeze
INTEGER2_MIN_MAX = [-32768, 32767].freeze
INTEGER3_MIN_MAX = [-8388608, 8388607].freeze
INTEGER4_MIN_MAX = [-2147483648, 2147483647].freeze
INTEGER8_MIN_MAX = [-9223372036854775808, 9223372036854775807].freeze
UNSIGNED_INTEGER1_MIN_MAX = [0, 255].freeze
UNSIGNED_INTEGER2_MIN_MAX = [0, 65535].freeze
UNSIGNED_INTEGER3_MIN_MAX = [0, 16777215].freeze
UNSIGNED_INTEGER4_MIN_MAX = [0, 4294967295].freeze
UNSIGNED_INTEGER8_MIN_MAX = [0, 18446744073709551615].freeze
# Look at the db_type and guess the minimum and maximum integer values for
# the column.
def column_schema_integer_min_max_values(db_type)
unsigned = /unsigned/i =~ db_type
case db_type
when /big|int8/i
unsigned ? UNSIGNED_INTEGER8_MIN_MAX : INTEGER8_MIN_MAX
when /medium/i
unsigned ? UNSIGNED_INTEGER3_MIN_MAX : INTEGER3_MIN_MAX
when /small|int2/i
unsigned ? UNSIGNED_INTEGER2_MIN_MAX : INTEGER2_MIN_MAX
when /tiny/i
(unsigned || column_schema_tinyint_type_is_unsigned?) ? UNSIGNED_INTEGER1_MIN_MAX : INTEGER1_MIN_MAX
else
unsigned ? UNSIGNED_INTEGER4_MIN_MAX : INTEGER4_MIN_MAX
end
end
# Whether the tinyint type (if supported by the database) is unsigned by default.
def column_schema_tinyint_type_is_unsigned?
false
end
# Look at the db_type and guess the maximum length of the column.
# This assumes types such as varchar(255).
def column_schema_max_length(db_type)
if db_type =~ /\((\d+)\)/
$1.to_i
end
end
# Return a Method object for the dataset's output_identifier_method.
# Used in metadata parsing to make sure the returned information is in the
# correct format.
def input_identifier_meth(ds=nil)
(ds || dataset).method(:input_identifier)
end
# Uncached version of metadata_dataset, designed for overriding.
def _metadata_dataset
dataset
end
# Return a dataset that uses the default identifier input and output methods
# for this database. Used when parsing metadata so that column symbols are
# returned as expected.
def metadata_dataset
@metadata_dataset ||= _metadata_dataset
end
# Return a Method object for the dataset's output_identifier_method.
# Used in metadata parsing to make sure the returned information is in the
# correct format.
def output_identifier_meth(ds=nil)
(ds || dataset).method(:output_identifier)
end
# Remove the cached schema for the given schema name
def remove_cached_schema(table)
cache = @default_dataset.send(:cache)
Sequel.synchronize{cache.clear}
k = quote_schema_table(table)
Sequel.synchronize{@schemas.delete(k)}
end
# Match the database's column type to a ruby type via a
# regular expression, and return the ruby type as a symbol
# such as :integer or :string.
def schema_column_type(db_type)
case db_type
when /\A(character( varying)?|n?(var)?char|n?text|string|clob)/io
:string
when /\A(int(eger)?|(big|small|tiny)int)/io
:integer
when /\Adate\z/io
:date
when /\A((small)?datetime|timestamp(\(\d\))?( with(out)? time zone)?)\z/io
:datetime
when /\Atime( with(out)? time zone)?\z/io
:time
when /\A(bool(ean)?)\z/io
:boolean
when /\A(real|float( unsigned)?|double( precision)?|double\(\d+,\d+\)( unsigned)?)\z/io
:float
when /\A(?:(?:(?:num(?:ber|eric)?|decimal)(?:\(\d+,\s*(\d+|false|true)\))?))\z/io
$1 && ['0', 'false'].include?($1) ? :integer : :decimal
when /bytea|blob|image|(var)?binary/io
:blob
when /\Aenum/io
:enum
end
end
# Post process the schema values.
def schema_post_process(cols)
# :nocov:
if RUBY_VERSION >= '2.5'
# :nocov:
cols.each do |_, h|
db_type = h[:db_type]
if db_type.is_a?(String)
h[:db_type] = -db_type
end
end
end
cols.each do |_,c|
c.each_value do |val|
val.freeze if val.is_a?(String)
end
end
end
end
end
sequel-5.63.0/lib/sequel/database/schema_generator.rb 0000664 0000000 0000000 00000066765 14342141206 0022567 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# The Schema module holds the schema generators.
module Schema
# Schema::CreateTableGenerator is an internal class that the user is not expected
# to instantiate directly. Instances are created by Database#create_table.
# It is used to specify table creation parameters. It takes a Database
# object and a block of column/index/constraint specifications, and
# gives the Database a table description, which the database uses to
# create a table.
#
# Schema::CreateTableGenerator has some methods but also includes method_missing,
# allowing users to specify column type as a method instead of using
# the column method, which makes for a nicer DSL.
#
# For more information on Sequel's support for schema modification, see
# the {"Schema Modification" guide}[rdoc-ref:doc/schema_modification.rdoc].
class CreateTableGenerator
# Classes specifying generic types that Sequel will convert to database-specific types.
GENERIC_TYPES=%w'String Integer Float Numeric BigDecimal Date DateTime Time File TrueClass FalseClass'.freeze
# Column hashes created by this generator
attr_reader :columns
# Constraint hashes created by this generator
attr_reader :constraints
# Index hashes created by this generator
attr_reader :indexes
# Set the database in which to create the table, and evaluate the block
# in the context of this object.
def initialize(db, &block)
@db = db
@columns = []
@indexes = []
@constraints = []
@primary_key = nil
instance_exec(&block) if block
end
# Use custom Bignum method to use :Bignum instead of Bignum class, to work
# correctly in cases where Bignum is the same as Integer.
def Bignum(name, opts=OPTS)
column(name, :Bignum, opts)
end
# Use custom Fixnum method to use Integer instead of Fixnum class, to avoid
# warnings on ruby 2.4+.
def Fixnum(name, opts=OPTS)
column(name, Integer, opts)
end
# Add a method for each of the given types that creates a column
# with that type as a constant. Types given should either already
# be constants/classes or a capitalized string/symbol with the same name
# as a constant/class.
def self.add_type_method(*types)
types.each do |type|
case type
when Symbol, String
method = type
type = Object.const_get(type)
else
method = type.to_s
end
define_method(method){|name, opts=OPTS| column(name, type, opts)}
end
nil
end
# Add an unnamed constraint, specified by the given block
# or args:
#
# check(num: 1..5) # CHECK num >= 1 AND num <= 5
# check{num > 5} # CHECK num > 5
def check(*args, &block)
constraint(nil, *args, &block)
end
# Add a column with the given name, type, and opts:
#
# column :num, :integer
# # num INTEGER
#
# column :name, String, null: false, default: 'a'
# # name varchar(255) NOT NULL DEFAULT 'a'
#
# inet :ip
# # ip inet
#
# You can also create columns via method missing, so the following are
# equivalent:
#
# column :number, :integer
# integer :number
#
# The following options are supported:
#
# :collate :: The collation to use for the column. For backwards compatibility,
# only symbols and string values are supported, and they are used verbatim.
# However, on PostgreSQL, symbols are literalized as regular identifiers,
# since unquoted collations are unlikely to be valid.
# :default :: The default value for the column.
# :deferrable :: For foreign key columns, this ensures referential integrity will work even if
# referencing table uses a foreign key value that does not
# yet exist on referenced table (but will exist before the transaction commits).
# Basically it adds DEFERRABLE INITIALLY DEFERRED on key creation.
# If you use :immediate as the value, uses DEFERRABLE INITIALLY IMMEDIATE.
# :generated_always_as :: Specify a GENERATED ALWAYS AS column expression,
# if generated columns are supported (PostgreSQL 12+, MariaDB 5.2.0+,
# and MySQL 5.7.6+).
# :index :: Create an index on this column. If given a hash, use the hash as the
# options for the index.
# :key :: For foreign key columns, the column in the associated table
# that this column references. Unnecessary if this column
# references the primary key of the associated table, except if you are
# using MySQL.
# :null :: Mark the column as allowing NULL values (if true),
# or not allowing NULL values (if false). The default is to allow NULL values.
# :on_delete :: Specify the behavior of this column when being deleted
# (:restrict, :cascade, :set_null, :set_default, :no_action).
# :on_update :: Specify the behavior of this column when being updated
# (:restrict, :cascade, :set_null, :set_default, :no_action).
# :primary_key :: Make the column as a single primary key column. This should not
# be used if you want a single autoincrementing primary key column
# (use the primary_key method in that case).
# :primary_key_constraint_name :: The name to give the primary key constraint
# :primary_key_deferrable :: Similar to :deferrable, but for the primary key constraint
# if :primary_key is used.
# :type :: Overrides the type given as the argument. Generally not used by column
# itself, but can be passed as an option to other methods that call column.
# :unique :: Mark the column as unique, generally has the same effect as
# creating a unique index on the column.
# :unique_constraint_name :: The name to give the unique key constraint
# :unique_deferrable :: Similar to :deferrable, but for the unique constraint if :unique
# is used.
#
# PostgreSQL specific options:
#
# :identity :: Create an identity column.
#
# MySQL specific options:
#
# :generated_type :: Set the type of column when using :generated_always_as,
# should be :virtual or :stored to force a type.
# :on_update_current_timestamp :: Use ON UPDATE CURRENT TIMESTAMP when defining the column,
# which will update the column value to CURRENT_TIMESTAMP
# on every UPDATE.
#
# Microsoft SQL Server specific options:
#
# :clustered :: When using :primary_key or :unique, marks the primary key or unique
# constraint as CLUSTERED (if true), or NONCLUSTERED (if false).
def column(name, type, opts = OPTS)
columns << {:name => name, :type => type}.merge!(opts)
if index_opts = opts[:index]
index(name, index_opts.is_a?(Hash) ? index_opts : OPTS)
end
nil
end
# Adds a named CHECK constraint (or unnamed if name is nil),
# with the given block or args. To provide options for the constraint, pass
# a hash as the first argument.
#
# constraint(:blah, num: 1..5)
# # CONSTRAINT blah CHECK num >= 1 AND num <= 5
# constraint({name: :blah, deferrable: true}, num: 1..5)
# # CONSTRAINT blah CHECK num >= 1 AND num <= 5 DEFERRABLE INITIALLY DEFERRED
#
# If the first argument is a hash, the following options are supported:
#
# Options:
# :name :: The name of the CHECK constraint
# :deferrable :: Whether the CHECK constraint should be marked DEFERRABLE.
#
# PostgreSQL specific options:
# :not_valid :: Whether the CHECK constraint should be marked NOT VALID.
def constraint(name, *args, &block)
opts = name.is_a?(Hash) ? name : {:name=>name}
constraints << opts.merge(:type=>:check, :check=>block || args)
nil
end
# Add a foreign key in the table that references another table. See #column
# for available options.
#
# foreign_key(:artist_id) # artist_id INTEGER
# foreign_key(:artist_id, :artists) # artist_id INTEGER REFERENCES artists
# foreign_key(:artist_id, :artists, key: :id) # artist_id INTEGER REFERENCES artists(id)
# foreign_key(:artist_id, :artists, type: String) # artist_id varchar(255) REFERENCES artists(id)
#
# Additional Options:
#
# :foreign_key_constraint_name :: The name to give the foreign key constraint
#
# If you want a foreign key constraint without adding a column (usually because it is a
# composite foreign key), you can provide an array of columns as the first argument, and
# you can provide the :name option to name the constraint:
#
# foreign_key([:artist_name, :artist_location], :artists, name: :artist_fk)
# # ADD CONSTRAINT artist_fk FOREIGN KEY (artist_name, artist_location) REFERENCES artists
def foreign_key(name, table=nil, opts = OPTS)
opts = case table
when Hash
table.merge(opts)
when NilClass
opts
else
opts.merge(:table=>table)
end
return composite_foreign_key(name, opts) if name.is_a?(Array)
column(name, Integer, opts)
end
# Add a full text index on the given columns.
# See #index for additional options.
#
# PostgreSQL specific options:
# :index_type :: Can be set to :gist to use a GIST index instead of the
# default GIN index.
# :language :: Set a language to use for the index (default: simple).
def full_text_index(columns, opts = OPTS)
index(columns, opts.merge(:type => :full_text))
end
# True if the generator includes the creation of a column with the given name.
def has_column?(name)
columns.any?{|c| c[:name] == name}
end
# Add an index on the given column(s) with the given options. Examples:
#
# index :name
# # CREATE INDEX table_name_index ON table (name)
#
# index [:artist_id, :name]
# # CREATE INDEX table_artist_id_name_index ON table (artist_id, name)
#
# index [:artist_id, :name], name: :foo
# # CREATE INDEX foo ON table (artist_id, name)
#
# General options:
#
# :include :: Include additional column values in the index, without
# actually indexing on those values (only supported by
# some databases).
# :name :: The name to use for the index. If not given, a default name
# based on the table and columns is used.
# :type :: The type of index to use (only supported by some databases,
# :full_text and :spatial values are handled specially).
# :unique :: Make the index unique, so duplicate values are not allowed.
# :where :: A filter expression, used to create a partial index (only
# supported by some databases).
#
# PostgreSQL specific options:
#
# :concurrently :: Create the index concurrently, so it doesn't block
# operations on the table while the index is being
# built.
# :if_not_exists :: Only create the index if an index of the same name doesn't already exist.
# :nulls_distinct :: Set whether separate NULLs should be considered distinct values in unique indexes.
# :opclass :: Set an opclass to use for all columns (per-column opclasses require
# custom SQL).
# :tablespace :: Specify tablespace for index.
#
# Microsoft SQL Server specific options:
#
# :key_index :: Sets the KEY INDEX to the given value.
def index(columns, opts = OPTS)
indexes << {:columns => Array(columns)}.merge!(opts)
nil
end
# Add a column with the given type, name, and opts. See #column for available
# options.
def method_missing(type, name = nil, opts = OPTS)
name ? column(name, type, opts) : super
end
# This object responds to all methods.
def respond_to_missing?(meth, include_private)
true
end
# Adds an autoincrementing primary key column or a primary key constraint.
# To just create a constraint, the first argument should be an array of column symbols
# specifying the primary key columns. To create an autoincrementing primary key
# column, a single symbol can be used. In both cases, an options hash can be used
# as the second argument.
#
# If you want to create a primary key column that is not autoincrementing, you
# should not use this method. Instead, you should use the regular +column+ method
# with a primary_key: true option.
#
# If an array of column symbols is used, you can specify the :name option
# to name the constraint.
#
# Options:
# :keep_order :: For non-composite primary keys, respects the existing order of
# columns, overriding the default behavior of making the primary
# key the first column.
#
# Examples:
# primary_key(:id)
# primary_key(:id, type: :Bignum, keep_order: true)
# primary_key([:street_number, :house_number], name: :some constraint_name)
def primary_key(name, *args)
return composite_primary_key(name, *args) if name.is_a?(Array)
column = @db.serial_primary_key_options.merge({:name => name})
if opts = args.pop
opts = {:type => opts} unless opts.is_a?(Hash)
if type = args.pop
opts = opts.merge(:type => type)
end
column.merge!(opts)
end
@primary_key = column
if column[:keep_order]
columns << column
else
columns.unshift(column)
end
nil
end
# The name of the primary key for this generator, if it has a primary key.
def primary_key_name
@primary_key[:name] if @primary_key
end
# Add a spatial index on the given columns.
# See #index for additional options.
def spatial_index(columns, opts = OPTS)
index(columns, opts.merge(:type => :spatial))
end
# Add a unique constraint on the given columns.
#
# unique(:name) # UNIQUE (name)
#
# Supports the same :deferrable option as #column. The :name option can be used
# to name the constraint.
def unique(columns, opts = OPTS)
constraints << {:type => :unique, :columns => Array(columns)}.merge!(opts)
nil
end
private
# Add a composite primary key constraint
def composite_primary_key(columns, *args)
opts = args.pop || OPTS
constraints << {:type => :primary_key, :columns => columns}.merge!(opts)
nil
end
# Add a composite foreign key constraint
def composite_foreign_key(columns, opts)
constraints << {:type => :foreign_key, :columns => columns}.merge!(opts)
nil
end
add_type_method(*GENERIC_TYPES)
end
# Schema::AlterTableGenerator is an internal class that the user is not expected
# to instantiate directly. Instances are created by Database#alter_table.
# It is used to specify table alteration parameters. It takes a Database
# object and a block of operations to perform on the table, and
# gives the Database an array of table altering operations, which the database uses to
# alter a table's description.
#
# For more information on Sequel's support for schema modification, see
# the {"Schema Modification" guide}[link:files/doc/schema_modification_rdoc.html].
class AlterTableGenerator
# An array of operations to perform
attr_reader :operations
# Set the Database object to which to apply the changes, and evaluate the
# block in the context of this object.
def initialize(db, &block)
@db = db
@operations = []
instance_exec(&block) if block
end
# Add a column with the given name, type, and opts.
# See CreateTableGenerator#column for the available options.
#
# add_column(:name, String) # ADD COLUMN name varchar(255)
#
# PostgreSQL specific options:
#
# :if_not_exists :: Set to true to not add the column if it already exists (PostgreSQL 9.6+)
#
# MySQL specific options:
#
# :after :: The name of an existing column that the new column should be positioned after
# :first :: Create this new column before all other existing columns
def add_column(name, type, opts = OPTS)
op = {:op => :add_column, :name => name, :type => type}.merge!(opts)
index_opts = op.delete(:index)
@operations << op
add_index(name, index_opts.is_a?(Hash) ? index_opts : OPTS) if index_opts
nil
end
# Add a constraint with the given name and args.
# See CreateTableGenerator#constraint.
#
# add_constraint(:valid_name, Sequel.like(:name, 'A%'))
# # ADD CONSTRAINT valid_name CHECK (name LIKE 'A%' ESCAPE '\')
# add_constraint({name: :valid_name, deferrable: true}, Sequel.like(:name, 'A%'))
# # ADD CONSTRAINT valid_name CHECK (name LIKE 'A%' ESCAPE '\') DEFERRABLE INITIALLY DEFERRED
def add_constraint(name, *args, &block)
opts = name.is_a?(Hash) ? name : {:name=>name}
@operations << opts.merge(:op=>:add_constraint, :type=>:check, :check=>block || args)
nil
end
# Add a unique constraint to the given column(s)
#
# add_unique_constraint(:name) # ADD UNIQUE (name)
# add_unique_constraint(:name, name: :unique_name) # ADD CONSTRAINT unique_name UNIQUE (name)
#
# Supports the same :deferrable option as CreateTableGenerator#column.
def add_unique_constraint(columns, opts = OPTS)
@operations << {:op => :add_constraint, :type => :unique, :columns => Array(columns)}.merge!(opts)
nil
end
# Add a foreign key with the given name and referencing the given table.
# See CreateTableGenerator#column for the available options.
#
# You can also pass an array of column names for creating composite foreign
# keys. In this case, it will assume the columns exist and will only add
# the constraint. You can provide a :name option to name the constraint.
#
# NOTE: If you need to add a foreign key constraint to a single existing column
# use the composite key syntax even if it is only one column.
#
# add_foreign_key(:artist_id, :table) # ADD COLUMN artist_id integer REFERENCES table
# add_foreign_key([:name], :table) # ADD FOREIGN KEY (name) REFERENCES table
#
# PostgreSQL specific options:
#
# :not_valid :: Set to true to add the constraint with the NOT VALID syntax.
# This makes it so that future inserts must respect referential
# integrity, but allows the constraint to be added even if existing
# column values reference rows that do not exist. After all the
# existing data has been cleaned up, validate_constraint can be used
# to mark the constraint as valid. Note that this option only makes
# sense when using an array of columns.
def add_foreign_key(name, table, opts = OPTS)
return add_composite_foreign_key(name, table, opts) if name.is_a?(Array)
add_column(name, Integer, {:table=>table}.merge!(opts))
end
# Add a full text index on the given columns.
# See CreateTableGenerator#full_text_index for available options.
def add_full_text_index(columns, opts = OPTS)
add_index(columns, {:type=>:full_text}.merge!(opts))
end
# Add an index on the given columns. See
# CreateTableGenerator#index for available options.
#
# add_index(:artist_id) # CREATE INDEX table_artist_id_index ON table (artist_id)
def add_index(columns, opts = OPTS)
@operations << {:op => :add_index, :columns => Array(columns)}.merge!(opts)
nil
end
# Add a primary key. See CreateTableGenerator#column
# for the available options. Like +add_foreign_key+, if you specify
# the column name as an array, it just creates a constraint:
#
# add_primary_key(:id) # ADD COLUMN id serial PRIMARY KEY
# add_primary_key([:artist_id, :name]) # ADD PRIMARY KEY (artist_id, name)
def add_primary_key(name, opts = OPTS)
return add_composite_primary_key(name, opts) if name.is_a?(Array)
opts = @db.serial_primary_key_options.merge(opts)
add_column(name, opts.delete(:type), opts)
end
# Add a spatial index on the given columns.
# See CreateTableGenerator#index for available options.
def add_spatial_index(columns, opts = OPTS)
add_index(columns, {:type=>:spatial}.merge!(opts))
end
# Remove a column from the table.
#
# drop_column(:artist_id) # DROP COLUMN artist_id
# drop_column(:artist_id, cascade: true) # DROP COLUMN artist_id CASCADE
#
# Options:
#
# :cascade :: CASCADE the operation, dropping other objects that depend on
# the dropped column.
#
# PostgreSQL specific options:
# :if_exists :: Use IF EXISTS, so no error is raised if the column does not
# exist.
def drop_column(name, opts=OPTS)
@operations << {:op => :drop_column, :name => name}.merge!(opts)
nil
end
# Remove a constraint from the table:
#
# drop_constraint(:unique_name) # DROP CONSTRAINT unique_name
# drop_constraint(:unique_name, cascade: true) # DROP CONSTRAINT unique_name CASCADE
#
# MySQL/SQLite specific options:
#
# :type :: Set the type of constraint to drop, either :primary_key, :foreign_key,
# or :unique.
def drop_constraint(name, opts=OPTS)
@operations << {:op => :drop_constraint, :name => name}.merge!(opts)
nil
end
# Remove a foreign key and the associated column from the table. General options:
#
# :name :: The name of the constraint to drop. If not given, uses the same name
# that would be used by add_foreign_key with the same columns.
#
# NOTE: If you want to drop only the foreign key constraint but keep the column,
# use the composite key syntax even if it is only one column.
#
# drop_foreign_key(:artist_id) # DROP CONSTRAINT table_artist_id_fkey, DROP COLUMN artist_id
# drop_foreign_key([:name]) # DROP CONSTRAINT table_name_fkey
def drop_foreign_key(name, opts=OPTS)
if !name.is_a?(Array) && opts[:foreign_key_constraint_name]
opts = Hash[opts]
opts[:name] = opts[:foreign_key_constraint_name]
end
drop_composite_foreign_key(Array(name), opts)
drop_column(name) unless name.is_a?(Array)
end
# Remove an index from the table. General options:
#
# :name :: The name of the index to drop. If not given, uses the same name
# that would be used by add_index with the same columns.
#
# PostgreSQL specific options:
#
# :cascade :: Cascade the index drop to dependent objects.
# :concurrently :: Drop the index using CONCURRENTLY, which doesn't block
# operations on the table. Supported in PostgreSQL 9.2+.
# :if_exists :: Only drop the index if it already exists.
#
# drop_index(:artist_id) # DROP INDEX table_artist_id_index
# drop_index([:a, :b]) # DROP INDEX table_a_b_index
# drop_index([:a, :b], name: :foo) # DROP INDEX foo
def drop_index(columns, options=OPTS)
@operations << {:op => :drop_index, :columns => Array(columns)}.merge!(options)
nil
end
# Rename one of the table's columns.
#
# rename_column(:name, :artist_name) # RENAME COLUMN name TO artist_name
def rename_column(name, new_name, opts = OPTS)
@operations << {:op => :rename_column, :name => name, :new_name => new_name}.merge!(opts)
nil
end
# Modify the default value for one of the table's column.
#
# set_column_default(:artist_name, 'a') # ALTER COLUMN artist_name SET DEFAULT 'a'
#
# To remove an existing default value, use +nil+ as the value:
#
# set_column_default(:artist_name, nil) # ALTER COLUMN artist_name SET DEFAULT NULL
#
# On MySQL, make sure to use a symbol for the name of the column, as otherwise you
# can lose the type and NULL/NOT NULL setting for the column.
def set_column_default(name, default)
@operations << {:op => :set_column_default, :name => name, :default => default}
nil
end
# Modify the type of one of the table's column.
#
# set_column_type(:artist_name, 'char(10)') # ALTER COLUMN artist_name TYPE char(10)
#
# PostgreSQL specific options:
#
# :using :: Add a USING clause that specifies how to convert existing values to new values.
#
# On MySQL, make sure to use a symbol for the name of the column, as otherwise you
# can lose the default and NULL/NOT NULL setting for the column.
def set_column_type(name, type, opts=OPTS)
@operations << {:op => :set_column_type, :name => name, :type => type}.merge!(opts)
nil
end
# Set a given column as allowing NULL values.
#
# set_column_allow_null(:artist_name) # ALTER COLUMN artist_name DROP NOT NULL
#
# On MySQL, make sure to use a symbol for the name of the column, as otherwise you
# can lose the default and type for the column.
def set_column_allow_null(name, allow_null=true)
@operations << {:op => :set_column_null, :name => name, :null => allow_null}
nil
end
# Set a given column as not allowing NULL values.
#
# set_column_not_null(:artist_name) # ALTER COLUMN artist_name SET NOT NULL
#
# On MySQL, make sure to use a symbol for the name of the column, as otherwise you
# can lose the default and type for the column.
def set_column_not_null(name)
set_column_allow_null(name, false)
end
private
# Add a composite primary key constraint
def add_composite_primary_key(columns, opts)
@operations << {:op => :add_constraint, :type => :primary_key, :columns => columns}.merge!(opts)
nil
end
# Add a composite foreign key constraint
def add_composite_foreign_key(columns, table, opts)
@operations << {:op => :add_constraint, :type => :foreign_key, :columns => columns, :table => table}.merge!(opts)
nil
end
# Drop a composite foreign key constraint
def drop_composite_foreign_key(columns, opts)
@operations << opts.merge(:op => :drop_constraint, :type => :foreign_key, :columns => columns)
nil
end
end
end
end
sequel-5.63.0/lib/sequel/database/schema_methods.rb 0000664 0000000 0000000 00000117223 14342141206 0022226 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 2 - Methods that modify the database schema
# These methods execute code on the database that modifies the database's schema.
# ---------------------
# The order of column modifiers to use when defining a column.
COLUMN_DEFINITION_ORDER = [:collate, :default, :null, :unique, :primary_key, :auto_increment, :references].freeze
# The alter table operations that are combinable.
COMBINABLE_ALTER_TABLE_OPS = [:add_column, :drop_column, :rename_column,
:set_column_type, :set_column_default, :set_column_null,
:add_constraint, :drop_constraint].freeze
# Adds a column to the specified table. This method expects a column name,
# a datatype and optionally a hash with additional constraints and options:
#
# DB.add_column :items, :name, String, unique: true, null: false
# DB.add_column :items, :category, String, default: 'ruby'
#
# See alter_table.
def add_column(table, *args)
alter_table(table) {add_column(*args)}
end
# Adds an index to a table for the given columns:
#
# DB.add_index :posts, :title
# DB.add_index :posts, [:author, :title], unique: true
#
# Options:
#
# :ignore_errors :: Ignore any DatabaseErrors that are raised
# :name :: Name to use for index instead of default
#
# See alter_table.
def add_index(table, columns, options=OPTS)
e = options[:ignore_errors]
begin
alter_table(table){add_index(columns, options)}
rescue DatabaseError
raise unless e
end
nil
end
# Alters the given table with the specified block. Example:
#
# DB.alter_table :items do
# add_column :category, String, default: 'ruby'
# drop_column :category
# rename_column :cntr, :counter
# set_column_type :value, Float
# set_column_default :value, 4.2
# add_index [:group, :category]
# drop_index [:group, :category]
# end
#
# Note that +add_column+ accepts all the options available for column
# definitions using create_table, and +add_index+ accepts all the options
# available for index definition.
#
# See Schema::AlterTableGenerator and the {Migrations guide}[rdoc-ref:doc/migration.rdoc].
def alter_table(name, &block)
generator = alter_table_generator(&block)
remove_cached_schema(name)
apply_alter_table_generator(name, generator)
nil
end
# Return a new Schema::AlterTableGenerator instance with the receiver as
# the database and the given block.
def alter_table_generator(&block)
alter_table_generator_class.new(self, &block)
end
# Create a join table using a hash of foreign keys to referenced
# table names. Example:
#
# create_join_table(cat_id: :cats, dog_id: :dogs)
# # CREATE TABLE cats_dogs (
# # cat_id integer NOT NULL REFERENCES cats,
# # dog_id integer NOT NULL REFERENCES dogs,
# # PRIMARY KEY (cat_id, dog_id)
# # )
# # CREATE INDEX cats_dogs_dog_id_cat_id_index ON cats_dogs(dog_id, cat_id)
#
# The primary key and index are used so that almost all operations
# on the table can benefit from one of the two indexes, and the primary
# key ensures that entries in the table are unique, which is the typical
# desire for a join table.
#
# The default table name this will create is the sorted version of the two
# hash values, joined by an underscore. So the following two method calls
# create the same table:
#
# create_join_table(cat_id: :cats, dog_id: :dogs) # cats_dogs
# create_join_table(dog_id: :dogs, cat_id: :cats) # cats_dogs
#
# You can provide column options by making the values in the hash
# be option hashes, so long as the option hashes have a :table
# entry giving the table referenced:
#
# create_join_table(cat_id: {table: :cats, type: :Bignum}, dog_id: :dogs)
#
# You can provide a second argument which is a table options hash:
#
# create_join_table({cat_id: :cats, dog_id: :dogs}, temp: true)
#
# Some table options are handled specially:
#
# :index_options :: The options to pass to the index
# :name :: The name of the table to create
# :no_index :: Set to true not to create the second index.
# :no_primary_key :: Set to true to not create the primary key.
def create_join_table(hash, options=OPTS)
keys = hash.keys.sort
create_table(join_table_name(hash, options), options) do
keys.each do |key|
v = hash[key]
unless v.is_a?(Hash)
v = {:table=>v}
end
v[:null] = false unless v.has_key?(:null)
foreign_key(key, v)
end
primary_key(keys) unless options[:no_primary_key]
index(keys.reverse, options[:index_options] || OPTS) unless options[:no_index]
end
nil
end
# Forcibly create a join table, attempting to drop it if it already exists, then creating it.
def create_join_table!(hash, options=OPTS)
drop_table?(join_table_name(hash, options))
create_join_table(hash, options)
end
# Creates the join table unless it already exists.
def create_join_table?(hash, options=OPTS)
if supports_create_table_if_not_exists? && options[:no_index]
create_join_table(hash, options.merge(:if_not_exists=>true))
elsif !table_exists?(join_table_name(hash, options))
create_join_table(hash, options)
end
end
# Creates a table with the columns given in the provided block:
#
# DB.create_table :posts do
# primary_key :id
# column :title, String
# String :content
# index :title
# end
#
# General options:
# :as :: Create the table using the value, which should be either a
# dataset or a literal SQL string. If this option is used,
# a block should not be given to the method.
# :ignore_index_errors :: Ignore any errors when creating indexes.
# :temp :: Create the table as a temporary table.
#
# MySQL specific options:
# :charset :: The character set to use for the table.
# :collate :: The collation to use for the table.
# :engine :: The table engine to use for the table.
#
# PostgreSQL specific options:
# :on_commit :: Either :preserve_rows (default), :drop or :delete_rows. Should
# only be specified when creating a temporary table.
# :foreign :: Create a foreign table. The value should be the name of the
# foreign server that was specified in CREATE SERVER.
# :inherits :: Inherit from a different table. An array can be
# specified to inherit from multiple tables.
# :unlogged :: Create the table as an unlogged table.
# :options :: The OPTIONS clause to use for foreign tables. Should be a hash
# where keys are option names and values are option values. Note
# that option names are unquoted, so you should not use untrusted
# keys.
# :tablespace :: The tablespace to use for the table.
#
# SQLite specific options:
# :strict :: Create a STRICT table, which checks that the values for the columns
# are the correct type (similar to all other SQL databases). Note that
# when using this option, all column types used should be one of the
# following: +int+, +integer+, +real+, +text+, +blob+, and +any+.
# The +any+ type is treated like a SQLite column in a non-strict table,
# allowing any type of data to be stored. This option is supported on
# SQLite 3.37.0+.
#
# See Schema::CreateTableGenerator and the {"Schema Modification" guide}[rdoc-ref:doc/schema_modification.rdoc].
def create_table(name, options=OPTS, &block)
remove_cached_schema(name)
if sql = options[:as]
raise(Error, "can't provide both :as option and block to create_table") if block
create_table_as(name, sql, options)
else
generator = options[:generator] || create_table_generator(&block)
create_table_from_generator(name, generator, options)
create_table_indexes_from_generator(name, generator, options)
end
nil
end
# Forcibly create a table, attempting to drop it if it already exists, then creating it.
#
# DB.create_table!(:a){Integer :a}
# # SELECT NULL FROM a LIMIT 1 -- check existence
# # DROP TABLE a -- drop table if already exists
# # CREATE TABLE a (a integer)
def create_table!(name, options=OPTS, &block)
drop_table?(name)
create_table(name, options, &block)
end
# Creates the table unless the table already exists.
#
# DB.create_table?(:a){Integer :a}
# # SELECT NULL FROM a LIMIT 1 -- check existence
# # CREATE TABLE a (a integer) -- if it doesn't already exist
def create_table?(name, options=OPTS, &block)
options = options.dup
generator = options[:generator] ||= create_table_generator(&block)
if generator.indexes.empty? && supports_create_table_if_not_exists?
create_table(name, options.merge!(:if_not_exists=>true))
elsif !table_exists?(name)
create_table(name, options)
end
end
# Return a new Schema::CreateTableGenerator instance with the receiver as
# the database and the given block.
def create_table_generator(&block)
create_table_generator_class.new(self, &block)
end
# Creates a view, replacing a view with the same name if one already exists.
#
# DB.create_or_replace_view(:some_items, "SELECT * FROM items WHERE price < 100")
# DB.create_or_replace_view(:some_items, DB[:items].where(category: 'ruby'))
#
# For databases where replacing a view is not natively supported, support
# is emulated by dropping a view with the same name before creating the view.
def create_or_replace_view(name, source, options = OPTS)
if supports_create_or_replace_view?
options = options.merge(:replace=>true)
else
swallow_database_error{drop_view(name)}
end
create_view(name, source, options)
nil
end
# Creates a view based on a dataset or an SQL string:
#
# DB.create_view(:cheap_items, "SELECT * FROM items WHERE price < 100")
# # CREATE VIEW cheap_items AS
# # SELECT * FROM items WHERE price < 100
#
# DB.create_view(:ruby_items, DB[:items].where(category: 'ruby'))
# # CREATE VIEW ruby_items AS
# # SELECT * FROM items WHERE (category = 'ruby')
#
# DB.create_view(:checked_items, DB[:items].where(:foo), check: true)
# # CREATE VIEW checked_items AS
# # SELECT * FROM items WHERE foo
# # WITH CHECK OPTION
#
# DB.create_view(:bar_items, DB[:items].select(:foo), columns: [:bar])
# # CREATE VIEW bar_items (bar) AS
# # SELECT foo FROM items
#
# Options:
# :columns :: The column names to use for the view. If not given,
# automatically determined based on the input dataset.
# :check :: Adds a WITH CHECK OPTION clause, so that attempting to modify
# rows in the underlying table that would not be returned by the
# view is not allowed. This can be set to :local to use WITH
# LOCAL CHECK OPTION.
#
# PostgreSQL/SQLite specific option:
# :temp :: Create a temporary view, automatically dropped on disconnect.
#
# PostgreSQL specific options:
# :materialized :: Creates a materialized view, similar to a regular view,
# but backed by a physical table.
# :recursive :: Creates a recursive view. As columns must be specified for
# recursive views, you can also set them as the value of this
# option. Since a recursive view requires a union that isn't
# in a subquery, if you are providing a Dataset as the source
# argument, if should probably call the union method with the
# all: true and from_self: false options.
# :security_invoker :: Set the security_invoker property on the view, making
# the access to the view use the current user's permissions,
# instead of the view owner's permissions.
# :tablespace :: The tablespace to use for materialized views.
def create_view(name, source, options = OPTS)
execute_ddl(create_view_sql(name, source, options))
remove_cached_schema(name)
nil
end
# Removes a column from the specified table:
#
# DB.drop_column :items, :category
#
# See alter_table.
def drop_column(table, *args)
alter_table(table) {drop_column(*args)}
end
# Removes an index for the given table and column(s):
#
# DB.drop_index :posts, :title
# DB.drop_index :posts, [:author, :title]
#
# See alter_table.
def drop_index(table, columns, options=OPTS)
alter_table(table){drop_index(columns, options)}
end
# Drop the join table that would have been created with the
# same arguments to create_join_table:
#
# drop_join_table(cat_id: :cats, dog_id: :dogs)
# # DROP TABLE cats_dogs
def drop_join_table(hash, options=OPTS)
drop_table(join_table_name(hash, options), options)
end
# Drops one or more tables corresponding to the given names:
#
# DB.drop_table(:posts) # DROP TABLE posts
# DB.drop_table(:posts, :comments)
# DB.drop_table(:posts, :comments, cascade: true)
def drop_table(*names)
options = names.last.is_a?(Hash) ? names.pop : OPTS
names.each do |n|
execute_ddl(drop_table_sql(n, options))
remove_cached_schema(n)
end
nil
end
# Drops the table if it already exists. If it doesn't exist,
# does nothing.
#
# DB.drop_table?(:a)
# # SELECT NULL FROM a LIMIT 1 -- check existence
# # DROP TABLE a -- if it already exists
def drop_table?(*names)
options = names.last.is_a?(Hash) ? names.pop : OPTS
if supports_drop_table_if_exists?
options = options.merge(:if_exists=>true)
names.each do |name|
drop_table(name, options)
end
else
names.each do |name|
drop_table(name, options) if table_exists?(name)
end
end
nil
end
# Drops one or more views corresponding to the given names:
#
# DB.drop_view(:cheap_items)
# DB.drop_view(:cheap_items, :pricey_items)
# DB.drop_view(:cheap_items, :pricey_items, cascade: true)
# DB.drop_view(:cheap_items, :pricey_items, if_exists: true)
#
# Options:
# :cascade :: Also drop objects depending on this view.
# :if_exists :: Do not raise an error if the view does not exist.
#
# PostgreSQL specific options:
# :materialized :: Drop a materialized view.
def drop_view(*names)
options = names.last.is_a?(Hash) ? names.pop : OPTS
names.each do |n|
execute_ddl(drop_view_sql(n, options))
remove_cached_schema(n)
end
nil
end
# Renames a table:
#
# DB.tables #=> [:items]
# DB.rename_table :items, :old_items
# DB.tables #=> [:old_items]
def rename_table(name, new_name)
execute_ddl(rename_table_sql(name, new_name))
remove_cached_schema(name)
nil
end
# Renames a column in the specified table. This method expects the current
# column name and the new column name:
#
# DB.rename_column :items, :cntr, :counter
#
# See alter_table.
def rename_column(table, *args)
alter_table(table) {rename_column(*args)}
end
# Sets the default value for the given column in the given table:
#
# DB.set_column_default :items, :category, 'perl!'
#
# See alter_table.
def set_column_default(table, *args)
alter_table(table) {set_column_default(*args)}
end
# Set the data type for the given column in the given table:
#
# DB.set_column_type :items, :price, :float
#
# See alter_table.
def set_column_type(table, *args)
alter_table(table) {set_column_type(*args)}
end
private
# Apply the changes in the given alter table ops to the table given by name.
def apply_alter_table(name, ops)
alter_table_sql_list(name, ops).each{|sql| execute_ddl(sql)}
end
# Apply the operations in the given generator to the table given by name.
def apply_alter_table_generator(name, generator)
ops = generator.operations
unless can_add_primary_key_constraint_on_nullable_columns?
if add_pk = ops.find{|op| op[:op] == :add_constraint && op[:type] == :primary_key}
ops = add_pk[:columns].map{|column| {:op => :set_column_null, :name => column, :null => false}} + ops
end
end
apply_alter_table(name, ops)
end
# The class used for alter_table generators.
def alter_table_generator_class
Schema::AlterTableGenerator
end
# SQL fragment for given alter table operation.
def alter_table_op_sql(table, op)
meth = "alter_table_#{op[:op]}_sql"
if respond_to?(meth, true)
# Allow calling private methods as alter table op sql methods are private
send(meth, table, op)
else
raise Error, "Unsupported ALTER TABLE operation: #{op[:op]}"
end
end
def alter_table_add_column_sql(table, op)
"ADD COLUMN #{column_definition_sql(op)}"
end
def alter_table_drop_column_sql(table, op)
"DROP COLUMN #{quote_identifier(op[:name])}#{' CASCADE' if op[:cascade]}"
end
def alter_table_rename_column_sql(table, op)
"RENAME COLUMN #{quote_identifier(op[:name])} TO #{quote_identifier(op[:new_name])}"
end
def alter_table_set_column_type_sql(table, op)
"ALTER COLUMN #{quote_identifier(op[:name])} TYPE #{type_literal(op)}"
end
def alter_table_set_column_default_sql(table, op)
"ALTER COLUMN #{quote_identifier(op[:name])} SET DEFAULT #{literal(op[:default])}"
end
def alter_table_set_column_null_sql(table, op)
"ALTER COLUMN #{quote_identifier(op[:name])} #{op[:null] ? 'DROP' : 'SET'} NOT NULL"
end
def alter_table_add_constraint_sql(table, op)
"ADD #{constraint_definition_sql(op)}"
end
def alter_table_drop_constraint_sql(table, op)
quoted_name = quote_identifier(op[:name]) if op[:name]
if op[:type] == :foreign_key
quoted_name ||= quote_identifier(foreign_key_name(table, op[:columns]))
end
"DROP CONSTRAINT #{quoted_name}#{' CASCADE' if op[:cascade]}"
end
# The SQL to execute to modify the table. op
# should be one of the operations returned by the AlterTableGenerator.
def alter_table_sql(table, op)
case op[:op]
when :add_index
index_definition_sql(table, op)
when :drop_index
drop_index_sql(table, op)
else
if sql = alter_table_op_sql(table, op)
"ALTER TABLE #{quote_schema_table(table)} #{sql}"
end
end
end
# Array of SQL statements used to modify the table,
# corresponding to changes specified by the operations.
def alter_table_sql_list(table, operations)
if supports_combining_alter_table_ops?
grouped_ops = []
last_combinable = false
operations.each do |op|
if combinable_alter_table_op?(op)
if sql = alter_table_op_sql(table, op)
grouped_ops << [] unless last_combinable
grouped_ops.last << sql
last_combinable = true
end
elsif sql = alter_table_sql(table, op)
Array(sql).each{|s| grouped_ops << s}
last_combinable = false
end
end
grouped_ops.map do |gop|
if gop.is_a?(Array)
"ALTER TABLE #{quote_schema_table(table)} #{gop.join(', ')}"
else
gop
end
end
else
operations.map{|op| alter_table_sql(table, op)}.flatten.compact
end
end
# The SQL string specify the autoincrement property, generally used by
# primary keys.
def auto_increment_sql
'AUTOINCREMENT'
end
# The order of the column definition, as an array of symbols.
def column_definition_order
COLUMN_DEFINITION_ORDER
end
# SQL fragment containing the column creation SQL for the given column.
def column_definition_sql(column)
sql = String.new
sql << "#{quote_identifier(column[:name])} #{type_literal(column)}"
column_definition_order.each{|m| send(:"column_definition_#{m}_sql", sql, column)}
sql
end
# Add auto increment SQL fragment to column creation SQL.
def column_definition_auto_increment_sql(sql, column)
sql << " #{auto_increment_sql}" if column[:auto_increment]
end
# Add collate SQL fragment to column creation SQL.
def column_definition_collate_sql(sql, column)
if collate = column[:collate]
sql << " COLLATE #{collate}"
end
end
# Add default SQL fragment to column creation SQL.
def column_definition_default_sql(sql, column)
sql << " DEFAULT #{literal(column[:default])}" if column.include?(:default)
end
# Add null/not null SQL fragment to column creation SQL.
def column_definition_null_sql(sql, column)
null = column.fetch(:null, column[:allow_null])
if null.nil? && !can_add_primary_key_constraint_on_nullable_columns? && column[:primary_key]
null = false
end
case null
when false
sql << ' NOT NULL'
when true
sql << ' NULL'
end
end
# Add primary key SQL fragment to column creation SQL.
def column_definition_primary_key_sql(sql, column)
if column[:primary_key]
if name = column[:primary_key_constraint_name]
sql << " CONSTRAINT #{quote_identifier(name)}"
end
sql << " " << primary_key_constraint_sql_fragment(column)
constraint_deferrable_sql_append(sql, column[:primary_key_deferrable])
end
end
# Add foreign key reference SQL fragment to column creation SQL.
def column_definition_references_sql(sql, column)
if column[:table]
if name = column[:foreign_key_constraint_name]
sql << " CONSTRAINT #{quote_identifier(name)}"
end
sql << column_references_column_constraint_sql(column)
end
end
# Add unique constraint SQL fragment to column creation SQL.
def column_definition_unique_sql(sql, column)
if column[:unique]
if name = column[:unique_constraint_name]
sql << " CONSTRAINT #{quote_identifier(name)}"
end
sql << ' ' << unique_constraint_sql_fragment(column)
constraint_deferrable_sql_append(sql, column[:unique_deferrable])
end
end
# SQL for all given columns, used inside a CREATE TABLE block.
def column_list_sql(generator)
(generator.columns.map{|c| column_definition_sql(c)} + generator.constraints.map{|c| constraint_definition_sql(c)}).join(', ')
end
# SQL fragment for column foreign key references (column constraints)
def column_references_column_constraint_sql(column)
column_references_sql(column)
end
# SQL fragment for column foreign key references
def column_references_sql(column)
sql = String.new
sql << " REFERENCES #{quote_schema_table(column[:table])}"
sql << "(#{Array(column[:key]).map{|x| quote_identifier(x)}.join(', ')})" if column[:key]
sql << " ON DELETE #{on_delete_clause(column[:on_delete])}" if column[:on_delete]
sql << " ON UPDATE #{on_update_clause(column[:on_update])}" if column[:on_update]
constraint_deferrable_sql_append(sql, column[:deferrable])
sql
end
# SQL fragment for table foreign key references (table constraints)
def column_references_table_constraint_sql(constraint)
"FOREIGN KEY #{literal(constraint[:columns])}#{column_references_sql(constraint)}"
end
# Whether the given alter table operation is combinable.
def combinable_alter_table_op?(op)
COMBINABLE_ALTER_TABLE_OPS.include?(op[:op])
end
# SQL fragment specifying a constraint on a table.
def constraint_definition_sql(constraint)
sql = String.new
sql << "CONSTRAINT #{quote_identifier(constraint[:name])} " if constraint[:name]
case constraint[:type]
when :check
check = constraint[:check]
check = check.first if check.is_a?(Array) && check.length == 1
check = filter_expr(check)
check = "(#{check})" unless check[0..0] == '(' && check[-1..-1] == ')'
sql << "CHECK #{check}"
when :primary_key
sql << "#{primary_key_constraint_sql_fragment(constraint)} #{literal(constraint[:columns])}"
when :foreign_key
sql << column_references_table_constraint_sql(constraint.merge(:deferrable=>nil))
when :unique
sql << "#{unique_constraint_sql_fragment(constraint)} #{literal(constraint[:columns])}"
else
raise Error, "Invalid constraint type #{constraint[:type]}, should be :check, :primary_key, :foreign_key, or :unique"
end
constraint_deferrable_sql_append(sql, constraint[:deferrable])
sql
end
# SQL fragment specifying the deferrable constraint attributes.
def constraint_deferrable_sql_append(sql, defer)
case defer
when nil
when false
sql << ' NOT DEFERRABLE'
when :immediate
sql << ' DEFERRABLE INITIALLY IMMEDIATE'
else
sql << ' DEFERRABLE INITIALLY DEFERRED'
end
end
# Execute the create table statements using the generator.
def create_table_from_generator(name, generator, options)
execute_ddl(create_table_sql(name, generator, options))
end
# The class used for create_table generators.
def create_table_generator_class
Schema::CreateTableGenerator
end
# Execute the create index statements using the generator.
def create_table_indexes_from_generator(name, generator, options)
e = options[:ignore_index_errors] || options[:if_not_exists]
generator.indexes.each do |index|
begin
pr = proc{index_sql_list(name, [index]).each{|sql| execute_ddl(sql)}}
supports_transactional_ddl? ? transaction(:savepoint=>:only, &pr) : pr.call
rescue Error
raise unless e
end
end
end
# SQL statement for creating a table with the given name, columns, and options
def create_table_sql(name, generator, options)
unless supports_named_column_constraints?
# Split column constraints into table constraints if they have a name
generator.columns.each do |c|
if (constraint_name = c.delete(:foreign_key_constraint_name)) && (table = c.delete(:table))
opts = {}
opts[:name] = constraint_name
[:key, :on_delete, :on_update, :deferrable].each{|k| opts[k] = c[k]}
generator.foreign_key([c[:name]], table, opts)
end
if (constraint_name = c.delete(:unique_constraint_name)) && c.delete(:unique)
generator.unique(c[:name], :name=>constraint_name)
end
if (constraint_name = c.delete(:primary_key_constraint_name)) && c.delete(:primary_key)
generator.primary_key([c[:name]], :name=>constraint_name)
end
end
end
unless can_add_primary_key_constraint_on_nullable_columns?
if pk = generator.constraints.find{|op| op[:type] == :primary_key}
pk[:columns].each do |column|
if matched_column = generator.columns.find{|gc| gc[:name] == column}
matched_column[:null] = false
end
end
end
end
"#{create_table_prefix_sql(name, options)} (#{column_list_sql(generator)})"
end
# Run SQL statement to create the table with the given name from the given
# SELECT sql statement.
def create_table_as(name, sql, options)
sql = sql.sql if sql.is_a?(Sequel::Dataset)
run(create_table_as_sql(name, sql, options))
end
# SQL statement for creating a table from the result of a SELECT statement.
# +sql+ should be a string representing a SELECT query.
def create_table_as_sql(name, sql, options)
"#{create_table_prefix_sql(name, options)} AS #{sql}"
end
# SQL fragment for initial part of CREATE TABLE statement
def create_table_prefix_sql(name, options)
"CREATE #{temporary_table_sql if options[:temp]}TABLE#{' IF NOT EXISTS' if options[:if_not_exists]} #{options[:temp] ? quote_identifier(name) : quote_schema_table(name)}"
end
# SQL fragment for initial part of CREATE VIEW statement
def create_view_prefix_sql(name, options)
create_view_sql_append_columns("CREATE #{'OR REPLACE 'if options[:replace]}VIEW #{quote_schema_table(name)}", options[:columns])
end
# SQL statement for creating a view.
def create_view_sql(name, source, options)
source = source.sql if source.is_a?(Dataset)
sql = String.new
sql << "#{create_view_prefix_sql(name, options)} AS #{source}"
if check = options[:check]
sql << " WITH#{' LOCAL' if check == :local} CHECK OPTION"
end
sql
end
# Append the column list to the SQL, if a column list is given.
def create_view_sql_append_columns(sql, columns)
if columns
sql += ' ('
schema_utility_dataset.send(:identifier_list_append, sql, columns)
sql << ')'
end
sql
end
# Default index name for the table and columns, may be too long
# for certain databases.
def default_index_name(table_name, columns)
schema, table = schema_and_table(table_name)
"#{"#{schema}_" if schema}#{table}_#{columns.map{|c| [String, Symbol].any?{|cl| c.is_a?(cl)} ? c : literal(c).gsub(/\W/, '_')}.join('_')}_index"
end
# Get foreign key name for given table and columns.
def foreign_key_name(table_name, columns)
keys = foreign_key_list(table_name).select{|key| key[:columns] == columns}
raise(Error, "#{keys.empty? ? 'Missing' : 'Ambiguous'} foreign key for #{columns.inspect}") unless keys.size == 1
keys.first[:name]
end
# The SQL to drop an index for the table.
def drop_index_sql(table, op)
"DROP INDEX #{quote_identifier(op[:name] || default_index_name(table, op[:columns]))}"
end
# SQL DDL statement to drop the table with the given name.
def drop_table_sql(name, options)
"DROP TABLE#{' IF EXISTS' if options[:if_exists]} #{quote_schema_table(name)}#{' CASCADE' if options[:cascade]}"
end
# SQL DDL statement to drop a view with the given name.
def drop_view_sql(name, options)
"DROP VIEW#{' IF EXISTS' if options[:if_exists]} #{quote_schema_table(name)}#{' CASCADE' if options[:cascade]}"
end
# Proxy the filter_expr call to the dataset, used for creating constraints.
# Support passing Proc arguments as blocks, as well as treating plain strings
# as literal strings, so that previous migrations that used this API do not break.
def filter_expr(arg=nil, &block)
if arg.is_a?(Proc) && !block
block = arg
arg = nil
elsif arg.is_a?(String)
arg = Sequel.lit(arg)
elsif arg.is_a?(Array)
if arg.first.is_a?(String)
arg = Sequel.lit(*arg)
elsif arg.length > 1
arg = Sequel.&(*arg)
end
end
schema_utility_dataset.literal(schema_utility_dataset.send(:filter_expr, arg, &block))
end
# SQL statement for creating an index for the table with the given name
# and index specifications.
def index_definition_sql(table_name, index)
index_name = index[:name] || default_index_name(table_name, index[:columns])
raise Error, "Index types are not supported for this database" if index[:type]
raise Error, "Partial indexes are not supported for this database" if index[:where] && !supports_partial_indexes?
"CREATE #{'UNIQUE ' if index[:unique]}INDEX #{quote_identifier(index_name)} ON #{quote_schema_table(table_name)} #{literal(index[:columns])}#{" WHERE #{filter_expr(index[:where])}" if index[:where]}"
end
# Array of SQL statements, one for each index specification,
# for the given table.
def index_sql_list(table_name, indexes)
indexes.map{|i| index_definition_sql(table_name, i)}
end
# Extract the join table name from the arguments given to create_join_table.
# Also does argument validation for the create_join_table method.
def join_table_name(hash, options)
entries = hash.values
raise Error, "must have 2 entries in hash given to (create|drop)_join_table" unless entries.length == 2
if options[:name]
options[:name]
else
table_names = entries.map{|e| join_table_name_extract(e)}
table_names.map(&:to_s).sort.join('_')
end
end
# Extract an individual join table name, which should either be a string
# or symbol, or a hash containing one of those as the value for :table.
def join_table_name_extract(entry)
case entry
when Symbol, String
entry
when Hash
join_table_name_extract(entry[:table])
else
raise Error, "can't extract table name from #{entry.inspect}"
end
end
# SQL fragment to use for ON DELETE, based on the given action.
# The following actions are recognized:
#
# :cascade :: Delete rows referencing this row.
# :no_action :: Raise an error if other rows reference this
# row, allow deferring of the integrity check.
# This is the default.
# :restrict :: Raise an error if other rows reference this row,
# but do not allow deferring the integrity check.
# :set_default :: Set columns referencing this row to their default value.
# :set_null :: Set columns referencing this row to NULL.
#
# Any other object given is just converted to a string, with "_" converted to " " and upcased.
def on_delete_clause(action)
action.to_s.gsub("_", " ").upcase
end
# Alias of #on_delete_clause, since the two usually behave the same.
def on_update_clause(action)
on_delete_clause(action)
end
# Add fragment for primary key specification, separated for easier overridding.
def primary_key_constraint_sql_fragment(_)
'PRIMARY KEY'
end
# Proxy the quote_schema_table method to the dataset
def quote_schema_table(table)
schema_utility_dataset.quote_schema_table(table)
end
# SQL statement for renaming a table.
def rename_table_sql(name, new_name)
"ALTER TABLE #{quote_schema_table(name)} RENAME TO #{quote_schema_table(new_name)}"
end
# Split the schema information from the table
def schema_and_table(table_name)
schema_utility_dataset.schema_and_table(table_name)
end
# Return true if the given column schema represents an autoincrementing primary key.
def schema_autoincrementing_primary_key?(schema)
!!(schema[:primary_key] && schema[:auto_increment])
end
# The dataset to use for proxying certain schema methods.
def schema_utility_dataset
@default_dataset
end
# Split the schema information from the table
def split_qualifiers(table_name)
schema_utility_dataset.split_qualifiers(table_name)
end
# SQL fragment for temporary table
def temporary_table_sql
'TEMPORARY '
end
# SQL fragment specifying the type of a given column.
def type_literal(column)
case column[:type]
when Class
type_literal_generic(column)
when :Bignum
type_literal_generic_bignum_symbol(column)
else
type_literal_specific(column)
end
end
# SQL fragment specifying the full type of a column,
# consider the type with possible modifiers.
def type_literal_generic(column)
meth = "type_literal_generic_#{column[:type].name.to_s.downcase}"
if respond_to?(meth, true)
# Allow calling private methods as per type literal generic methods are private
send(meth, column)
else
raise Error, "Unsupported ruby class used as database type: #{column[:type]}"
end
end
# Alias for type_literal_generic_numeric, to make overriding in a subclass easier.
def type_literal_generic_bigdecimal(column)
type_literal_generic_numeric(column)
end
# Sequel uses the bigint type by default for :Bignum symbol.
def type_literal_generic_bignum_symbol(column)
:bigint
end
# Sequel uses the date type by default for Dates.
def type_literal_generic_date(column)
:date
end
# Sequel uses the timestamp type by default for DateTimes.
def type_literal_generic_datetime(column)
:timestamp
end
# Alias for type_literal_generic_trueclass, to make overriding in a subclass easier.
def type_literal_generic_falseclass(column)
type_literal_generic_trueclass(column)
end
# Sequel uses the blob type by default for Files.
def type_literal_generic_file(column)
:blob
end
# Alias for type_literal_generic_integer, to make overriding in a subclass easier.
def type_literal_generic_fixnum(column)
type_literal_generic_integer(column)
end
# Sequel uses the double precision type by default for Floats.
def type_literal_generic_float(column)
:"double precision"
end
# Sequel uses the integer type by default for integers
def type_literal_generic_integer(column)
:integer
end
# Sequel uses the numeric type by default for Numerics and BigDecimals.
# If a size is given, it is used, otherwise, it will default to whatever
# the database default is for an unsized value.
def type_literal_generic_numeric(column)
column[:size] ? "numeric(#{Array(column[:size]).join(', ')})" : :numeric
end
# Sequel uses the varchar type by default for Strings. If a
# size isn't present, Sequel assumes a size of 255. If the
# :fixed option is used, Sequel uses the char type. If the
# :text option is used, Sequel uses the :text type.
def type_literal_generic_string(column)
if column[:text]
uses_clob_for_text? ? :clob : :text
elsif column[:fixed]
"char(#{column[:size]||default_string_column_size})"
else
"varchar(#{column[:size]||default_string_column_size})"
end
end
# Sequel uses the timestamp type by default for Time values.
# If the :only_time option is used, the time type is used.
def type_literal_generic_time(column)
if column[:only_time]
type_literal_generic_only_time(column)
else
type_literal_generic_datetime(column)
end
end
# Use time by default for Time values if :only_time option is used.
def type_literal_generic_only_time(column)
:time
end
# Sequel uses the boolean type by default for TrueClass and FalseClass.
def type_literal_generic_trueclass(column)
:boolean
end
# SQL fragment for the given type of a column if the column is not one of the
# generic types specified with a ruby class.
def type_literal_specific(column)
type = column[:type]
type = "double precision" if type.to_s == 'double'
column[:size] ||= default_string_column_size if type.to_s == 'varchar'
elements = column[:size] || column[:elements]
"#{type}#{literal(Array(elements)) if elements}#{' UNSIGNED' if column[:unsigned]}"
end
# Add fragment for unique specification, separated for easier overridding.
def unique_constraint_sql_fragment(_)
'UNIQUE'
end
# Whether clob should be used for String text: true columns.
def uses_clob_for_text?
false
end
end
end
sequel-5.63.0/lib/sequel/database/transactions.rb 0000664 0000000 0000000 00000050150 14342141206 0021746 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Database
# ---------------------
# :section: 8 - Methods related to database transactions
# Database transactions make multiple queries atomic, so
# that either all of the queries take effect or none of
# them do.
# ---------------------
TRANSACTION_ISOLATION_LEVELS = {:uncommitted=>'READ UNCOMMITTED'.freeze,
:committed=>'READ COMMITTED'.freeze,
:repeatable=>'REPEATABLE READ'.freeze,
:serializable=>'SERIALIZABLE'.freeze}.freeze
# The default transaction isolation level for this database,
# used for all future transactions. For MSSQL, this should be set
# to something if you ever plan to use the :isolation option to
# Database#transaction, as on MSSQL if affects all future transactions
# on the same connection.
attr_accessor :transaction_isolation_level
# If a transaction is not currently in process, yield to the block immediately.
# Otherwise, add the block to the list of blocks to call after the currently
# in progress transaction commits (and only if it commits).
# Options:
# :savepoint :: If currently inside a savepoint, only run this hook on transaction
# commit if all enclosing savepoints have been released.
# :server :: The server/shard to use.
def after_commit(opts=OPTS, &block)
raise Error, "must provide block to after_commit" unless block
synchronize(opts[:server]) do |conn|
if h = _trans(conn)
raise Error, "cannot call after_commit in a prepared transaction" if h[:prepare]
if opts[:savepoint] && in_savepoint?(conn)
add_savepoint_hook(conn, :after_commit, block)
else
add_transaction_hook(conn, :after_commit, block)
end
else
yield
end
end
end
# If a transaction is not currently in progress, ignore the block.
# Otherwise, add the block to the list of the blocks to call after the currently
# in progress transaction rolls back (and only if it rolls back).
# Options:
# :savepoint :: If currently inside a savepoint, run this hook immediately when
# any enclosing savepoint is rolled back, which may be before the transaction
# commits or rollsback.
# :server :: The server/shard to use.
def after_rollback(opts=OPTS, &block)
raise Error, "must provide block to after_rollback" unless block
synchronize(opts[:server]) do |conn|
if h = _trans(conn)
raise Error, "cannot call after_rollback in a prepared transaction" if h[:prepare]
if opts[:savepoint] && in_savepoint?(conn)
add_savepoint_hook(conn, :after_rollback, block)
else
add_transaction_hook(conn, :after_rollback, block)
end
end
end
end
# When exiting the transaction block through methods other than an exception
# (e.g. normal exit, non-local return, or throw), set the current transaction
# to rollback instead of committing. This is designed for use in cases where
# you want to preform a non-local return but also want to rollback instead of
# committing.
# Options:
# :cancel :: Cancel the current rollback_on_exit setting, so exiting will commit instead
# of rolling back.
# :savepoint :: Rollback only the current savepoint if inside a savepoint.
# Can also be an positive integer value to rollback that number of enclosing savepoints,
# up to and including the transaction itself.
# If the database does not support savepoints, this option is ignored and the entire
# transaction is affected.
# :server :: The server/shard the transaction is being executed on.
def rollback_on_exit(opts=OPTS)
synchronize(opts[:server]) do |conn|
raise Error, "Cannot call Sequel:: Database#rollback_on_exit unless inside a transaction" unless h = _trans(conn)
rollback = !opts[:cancel]
if supports_savepoints?
savepoints = h[:savepoints]
if level = opts[:savepoint]
level = 1 if level == true
raise Error, "invalid :savepoint option to Database#rollback_on_exit: #{level.inspect}" unless level.is_a?(Integer)
raise Error, "cannot pass nonpositive integer (#{level.inspect}) as :savepoint option to Database#rollback_on_exit" if level < 1
level.times do |i|
break unless savepoint = savepoints[-1 - i]
savepoint[:rollback_on_exit] = rollback
end
else
savepoints[0][:rollback_on_exit] = rollback
end
else
h[:rollback_on_exit] = rollback
end
end
nil
end
# Return true if already in a transaction given the options,
# false otherwise. Respects the :server option for selecting
# a shard.
def in_transaction?(opts=OPTS)
synchronize(opts[:server]){|conn| !!_trans(conn)}
end
# Returns a proc that you can call to check if the transaction
# has been rolled back. The proc will return nil if the
# transaction is still in progress, true if the transaction was
# rolled back, and false if it was committed. Raises an
# Error if called outside a transaction. Respects the :server
# option for selecting a shard.
def rollback_checker(opts=OPTS)
synchronize(opts[:server]) do |conn|
raise Error, "not in a transaction" unless t = _trans(conn)
t[:rollback_checker] ||= proc{Sequel.synchronize{t[:rolled_back]}}
end
end
# Starts a database transaction. When a database transaction is used,
# either all statements are successful or none of the statements are
# successful. Note that MySQL MyISAM tables do not support transactions.
#
# The following general options are respected:
#
# :auto_savepoint :: Automatically use a savepoint for Database#transaction calls
# inside this transaction block.
# :isolation :: The transaction isolation level to use for this transaction,
# should be :uncommitted, :committed, :repeatable, or :serializable,
# used if given and the database/adapter supports customizable
# transaction isolation levels.
# :num_retries :: The number of times to retry if the :retry_on option is used.
# The default is 5 times. Can be set to nil to retry indefinitely,
# but that is not recommended.
# :before_retry :: Proc to execute before retrying if the :retry_on option is used.
# Called with two arguments: the number of retry attempts (counting
# the current one) and the error the last attempt failed with.
# :prepare :: A string to use as the transaction identifier for a
# prepared transaction (two-phase commit), if the database/adapter
# supports prepared transactions.
# :retry_on :: An exception class or array of exception classes for which to
# automatically retry the transaction. Can only be set if not inside
# an existing transaction.
# Note that this should not be used unless the entire transaction
# block is idempotent, as otherwise it can cause non-idempotent
# behavior to execute multiple times.
# :rollback :: Can be set to :reraise to reraise any Sequel::Rollback exceptions
# raised, or :always to always rollback even if no exceptions occur
# (useful for testing).
# :server :: The server to use for the transaction. Set to :default, :read_only, or
# whatever symbol you used in the connect string when naming your servers.
# :savepoint :: Whether to create a new savepoint for this transaction,
# only respected if the database/adapter supports savepoints. By
# default Sequel will reuse an existing transaction, so if you want to
# use a savepoint you must use this option. If the surrounding transaction
# uses :auto_savepoint, you can set this to false to not use a savepoint.
# If the value given for this option is :only, it will only create a
# savepoint if it is inside a transaction.
#
# PostgreSQL specific options:
#
# :deferrable :: (9.1+) If present, set to DEFERRABLE if true or NOT DEFERRABLE if false.
# :read_only :: If present, set to READ ONLY if true or READ WRITE if false.
# :synchronous :: if non-nil, set synchronous_commit
# appropriately. Valid values true, :on, false, :off, :local (9.1+),
# and :remote_write (9.2+).
def transaction(opts=OPTS, &block)
opts = Hash[opts]
if retry_on = opts[:retry_on]
tot_retries = opts.fetch(:num_retries, 5)
num_retries = 0
begin
opts[:retry_on] = nil
opts[:retrying] = true
transaction(opts, &block)
rescue *retry_on => e
num_retries += 1
if tot_retries.nil? || num_retries <= tot_retries
opts[:before_retry].call(num_retries, e) if opts[:before_retry]
retry
end
raise
end
else
synchronize(opts[:server]) do |conn|
if opts[:savepoint] == :only
if supports_savepoints?
if _trans(conn)
opts[:savepoint] = true
else
return yield(conn)
end
else
opts[:savepoint] = false
end
end
if opts[:savepoint] && !supports_savepoints?
raise Sequel::InvalidOperation, "savepoints not supported on #{database_type}"
end
if already_in_transaction?(conn, opts)
if opts[:rollback] == :always && !opts.has_key?(:savepoint)
if supports_savepoints?
opts[:savepoint] = true
else
raise Sequel::Error, "cannot set :rollback=>:always transaction option if already inside a transaction"
end
end
if opts[:savepoint] != false && (stack = _trans(conn)[:savepoints]) && stack.last[:auto_savepoint]
opts[:savepoint] = true
end
unless opts[:savepoint]
if opts[:retrying]
raise Sequel::Error, "cannot set :retry_on options if you are already inside a transaction"
end
return yield(conn)
end
end
_transaction(conn, opts, &block)
end
end
end
private
# Internal generic transaction method. Any exception raised by the given
# block will cause the transaction to be rolled back. If the exception is
# not a Sequel::Rollback, the error will be reraised. If no exception occurs
# inside the block, the transaction is commited.
def _transaction(conn, opts=OPTS)
rollback = opts[:rollback]
begin
add_transaction(conn, opts)
begin_transaction(conn, opts)
if rollback == :always
begin
ret = yield(conn)
rescue Exception => e1
raise e1
ensure
raise ::Sequel::Rollback unless e1
end
else
yield(conn)
end
rescue Exception => e
begin
rollback_transaction(conn, opts)
rescue Exception => e3
end
transaction_error(e, :conn=>conn, :rollback=>rollback)
raise e3 if e3
ret
ensure
begin
committed = commit_or_rollback_transaction(e, conn, opts)
rescue Exception => e2
begin
raise_error(e2, :classes=>database_error_classes, :conn=>conn)
rescue Sequel::DatabaseError => e4
begin
rollback_transaction(conn, opts)
ensure
raise e4
end
end
ensure
remove_transaction(conn, committed)
end
end
end
# Synchronize access to the current transactions, returning the hash
# of options for the current transaction (if any)
def _trans(conn)
Sequel.synchronize{@transactions[conn]}
end
# Add the current thread to the list of active transactions
def add_transaction(conn, opts)
hash = transaction_options(conn, opts)
if supports_savepoints?
if t = _trans(conn)
t[:savepoints].push({:auto_savepoint=>opts[:auto_savepoint]})
return
else
hash[:savepoints] = [{:auto_savepoint=>opts[:auto_savepoint]}]
if (prep = opts[:prepare]) && supports_prepared_transactions?
hash[:prepare] = prep
end
end
elsif (prep = opts[:prepare]) && supports_prepared_transactions?
hash[:prepare] = prep
end
Sequel.synchronize{@transactions[conn] = hash}
end
# Set the given callable as a hook to be called. Type should be either
# :after_commit or :after_rollback.
def add_savepoint_hook(conn, type, block)
savepoint = _trans(conn)[:savepoints].last
(savepoint[type] ||= []) << block
end
# Set the given callable as a hook to be called. Type should be either
# :after_commit or :after_rollback.
def add_transaction_hook(conn, type, block)
hooks = _trans(conn)[type] ||= []
hooks << block
end
# Whether the given connection is already inside a transaction
def already_in_transaction?(conn, opts)
_trans(conn) && (!supports_savepoints? || !opts[:savepoint])
end
# Derive the transaction hash from the options passed to the transaction.
# Meant to be overridden.
def transaction_options(conn, opts)
{}
end
# Issue query to begin a new savepoint.
def begin_savepoint(conn, opts)
log_connection_execute(conn, begin_savepoint_sql(savepoint_level(conn)-1))
end
# SQL to start a new savepoint
def begin_savepoint_sql(depth)
"SAVEPOINT autopoint_#{depth}"
end
# Start a new database transaction on the given connection
def begin_new_transaction(conn, opts)
log_connection_execute(conn, begin_transaction_sql)
set_transaction_isolation(conn, opts)
end
# Start a new database transaction or a new savepoint on the given connection.
def begin_transaction(conn, opts=OPTS)
if in_savepoint?(conn)
begin_savepoint(conn, opts)
else
begin_new_transaction(conn, opts)
end
end
# SQL to BEGIN a transaction.
def begin_transaction_sql
'BEGIN'
end
# Whether to commit the current transaction. Thread.current.status is
# checked because Thread#kill skips rescue blocks (so exception would be
# nil), but the transaction should still be rolled back. On Ruby 1.9 (but
# not 2.0+), the thread status will still be "run", so Thread#kill
# will erroneously commit the transaction, and there isn't a workaround.
def commit_or_rollback_transaction(exception, conn, opts)
if exception
false
else
if rollback_on_transaction_exit?(conn, opts)
rollback_transaction(conn, opts)
false
else
commit_transaction(conn, opts)
true
end
end
end
# SQL to commit a savepoint
def commit_savepoint_sql(depth)
"RELEASE SAVEPOINT autopoint_#{depth}"
end
# Commit the active transaction on the connection
def commit_transaction(conn, opts=OPTS)
if supports_savepoints?
depth = savepoint_level(conn)
log_connection_execute(conn, depth > 1 ? commit_savepoint_sql(depth-1) : commit_transaction_sql)
else
log_connection_execute(conn, commit_transaction_sql)
end
end
# SQL to COMMIT a transaction.
def commit_transaction_sql
'COMMIT'
end
# Method called on the connection object to execute SQL on the database,
# used by the transaction code.
def connection_execute_method
:execute
end
# Which transaction errors to translate, blank by default.
def database_error_classes
[]
end
# Whether the connection is currently inside a savepoint.
def in_savepoint?(conn)
supports_savepoints? && savepoint_level(conn) > 1
end
# Retrieve the savepoint hooks that should be run for the given
# connection and commit status. This expacts that you are
# already inside a savepoint when calling.
def savepoint_hooks(conn, committed)
_trans(conn)[:savepoints].last[committed ? :after_commit : :after_rollback]
end
# Retrieve the transaction hooks that should be run for the given
# connection and commit status.
def transaction_hooks(conn, committed)
unless in_savepoint?(conn)
_trans(conn)[committed ? :after_commit : :after_rollback]
end
end
# Remove the current thread from the list of active transactions
def remove_transaction(conn, committed)
callbacks = transaction_hooks(conn, committed)
if in_savepoint?(conn)
savepoint_callbacks = savepoint_hooks(conn, committed)
if committed
savepoint_rollback_callbacks = savepoint_hooks(conn, false)
end
end
if transaction_finished?(conn)
h = _trans(conn)
rolled_back = !committed
Sequel.synchronize{h[:rolled_back] = rolled_back}
Sequel.synchronize{@transactions.delete(conn)}
elsif savepoint_callbacks || savepoint_rollback_callbacks
if committed
meth = in_savepoint?(conn) ? :add_savepoint_hook : :add_transaction_hook
if savepoint_callbacks
savepoint_callbacks.each do |block|
send(meth, conn, :after_commit, block)
end
end
if savepoint_rollback_callbacks
savepoint_rollback_callbacks.each do |block|
send(meth, conn, :after_rollback, block)
end
end
else
savepoint_callbacks.each(&:call)
end
end
callbacks.each(&:call) if callbacks
end
# SQL to rollback to a savepoint
def rollback_savepoint_sql(depth)
"ROLLBACK TO SAVEPOINT autopoint_#{depth}"
end
# Whether to rollback the transaction when exiting the transaction.
def rollback_on_transaction_exit?(conn, opts)
return true if Thread.current.status == 'aborting'
h = _trans(conn)
if supports_savepoints?
h[:savepoints].last[:rollback_on_exit]
else
h[:rollback_on_exit]
end
end
# Rollback the active transaction on the connection
def rollback_transaction(conn, opts=OPTS)
if supports_savepoints?
depth = savepoint_level(conn)
log_connection_execute(conn, depth > 1 ? rollback_savepoint_sql(depth-1) : rollback_transaction_sql)
else
log_connection_execute(conn, rollback_transaction_sql)
end
end
# SQL to ROLLBACK a transaction.
def rollback_transaction_sql
'ROLLBACK'
end
# Set the transaction isolation level on the given connection
def set_transaction_isolation(conn, opts)
if supports_transaction_isolation_levels? and level = opts.fetch(:isolation, transaction_isolation_level)
log_connection_execute(conn, set_transaction_isolation_sql(level))
end
end
# SQL to set the transaction isolation level
def set_transaction_isolation_sql(level)
"SET TRANSACTION ISOLATION LEVEL #{TRANSACTION_ISOLATION_LEVELS[level]}"
end
# Current savepoint level.
def savepoint_level(conn)
_trans(conn)[:savepoints].length
end
# Raise a database error unless the exception is an Rollback.
def transaction_error(e, opts=OPTS)
if e.is_a?(Rollback)
raise e if opts[:rollback] == :reraise
else
raise_error(e, opts.merge(:classes=>database_error_classes))
end
end
# Finish a subtransaction. If savepoints are supported, pops the current
# tansaction off the savepoint stack.
def transaction_finished?(conn)
if supports_savepoints?
stack = _trans(conn)[:savepoints]
stack.pop
stack.empty?
else
true
end
end
end
end
sequel-5.63.0/lib/sequel/dataset.rb 0000664 0000000 0000000 00000004107 14342141206 0017120 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# A dataset represents an SQL query. Datasets
# can be used to select, insert, update and delete records.
#
# Query results are always retrieved on demand, so a dataset can be kept
# around and reused indefinitely (datasets never cache results):
#
# my_posts = DB[:posts].where(author: 'david') # no records are retrieved
# my_posts.all # records are retrieved
# my_posts.all # records are retrieved again
#
# Datasets are frozen and use a functional style where modification methods
# return modified copies of the the dataset. This allows you to reuse
# datasets:
#
# posts = DB[:posts]
# davids_posts = posts.where(author: 'david')
# old_posts = posts.where{stamp < Date.today - 7}
# davids_old_posts = davids_posts.where{stamp < Date.today - 7}
#
# Datasets are Enumerable objects, so they can be manipulated using many
# of the Enumerable methods, such as +map+ and +inject+. Note that there are some methods
# that Dataset defines that override methods defined in Enumerable and result in different
# behavior, such as +select+ and +group_by+.
#
# For more information, see the {"Dataset Basics" guide}[rdoc-ref:doc/dataset_basics.rdoc].
class Dataset
OPTS = Sequel::OPTS
# Whether Dataset#freeze can actually freeze datasets. True only on ruby 2.4+,
# as it requires clone(freeze: false)
TRUE_FREEZE = RUBY_VERSION >= '2.4'
include Enumerable
include SQL::AliasMethods
include SQL::BooleanMethods
include SQL::CastMethods
include SQL::ComplexExpressionMethods
include SQL::InequalityMethods
include SQL::NumericMethods
include SQL::OrderMethods
include SQL::StringMethods
end
require_relative "dataset/query"
require_relative "dataset/actions"
require_relative "dataset/features"
require_relative "dataset/graph"
require_relative "dataset/prepared_statements"
require_relative "dataset/misc"
require_relative "dataset/sql"
require_relative "dataset/placeholder_literalizer"
require_relative "dataset/dataset_module"
end
sequel-5.63.0/lib/sequel/dataset/ 0000775 0000000 0000000 00000000000 14342141206 0016571 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/dataset/actions.rb 0000664 0000000 0000000 00000142440 14342141206 0020563 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 2 - Methods that execute code on the database
# These methods all execute the dataset's SQL on the database.
# They don't return modified datasets, so if used in a method chain
# they should be the last method called.
# ---------------------
# Action methods defined by Sequel that execute code on the database.
ACTION_METHODS = (<<-METHS).split.map(&:to_sym).freeze
<< [] all as_hash avg count columns columns! delete each
empty? fetch_rows first first! get import insert last
map max min multi_insert paged_each select_hash select_hash_groups select_map select_order_map
single_record single_record! single_value single_value! sum to_hash to_hash_groups truncate update
where_all where_each where_single_value
METHS
# The clone options to use when retrieving columns for a dataset.
COLUMNS_CLONE_OPTIONS = {:distinct => nil, :limit => 0, :offset=>nil, :where=>nil, :having=>nil, :order=>nil, :row_proc=>nil, :graph=>nil, :eager_graph=>nil}.freeze
# Inserts the given argument into the database. Returns self so it
# can be used safely when chaining:
#
# DB[:items] << {id: 0, name: 'Zero'} << DB[:old_items].select(:id, name)
def <<(arg)
insert(arg)
self
end
# Returns the first record matching the conditions. Examples:
#
# DB[:table][id: 1] # SELECT * FROM table WHERE (id = 1) LIMIT 1
# # => {:id=>1}
def [](*conditions)
raise(Error, 'You cannot call Dataset#[] with an integer or with no arguments') if (conditions.length == 1 and conditions.first.is_a?(Integer)) or conditions.length == 0
first(*conditions)
end
# Returns an array with all records in the dataset. If a block is given,
# the array is iterated over after all items have been loaded.
#
# DB[:table].all # SELECT * FROM table
# # => [{:id=>1, ...}, {:id=>2, ...}, ...]
#
# # Iterate over all rows in the table
# DB[:table].all{|row| p row}
def all(&block)
_all(block){|a| each{|r| a << r}}
end
# Returns the average value for the given column/expression.
# Uses a virtual row block if no argument is given.
#
# DB[:table].avg(:number) # SELECT avg(number) FROM table LIMIT 1
# # => 3
# DB[:table].avg{function(column)} # SELECT avg(function(column)) FROM table LIMIT 1
# # => 1
def avg(arg=(no_arg = true), &block)
arg = Sequel.virtual_row(&block) if no_arg
_aggregate(:avg, arg)
end
# Returns the columns in the result set in order as an array of symbols.
# If the columns are currently cached, returns the cached value. Otherwise,
# a SELECT query is performed to retrieve a single row in order to get the columns.
#
# If you are looking for all columns for a single table and maybe some information about
# each column (e.g. database type), see Database#schema.
#
# DB[:table].columns
# # => [:id, :name]
def columns
_columns || columns!
end
# Ignore any cached column information and perform a query to retrieve
# a row in order to get the columns.
#
# DB[:table].columns!
# # => [:id, :name]
def columns!
ds = clone(COLUMNS_CLONE_OPTIONS)
ds.each{break}
if cols = ds.cache[:_columns]
self.columns = cols
else
[]
end
end
COUNT_SELECT = Sequel.function(:count).*.as(:count)
# Returns the number of records in the dataset. If an argument is provided,
# it is used as the argument to count. If a block is provided, it is
# treated as a virtual row, and the result is used as the argument to
# count.
#
# DB[:table].count # SELECT count(*) AS count FROM table LIMIT 1
# # => 3
# DB[:table].count(:column) # SELECT count(column) AS count FROM table LIMIT 1
# # => 2
# DB[:table].count{foo(column)} # SELECT count(foo(column)) AS count FROM table LIMIT 1
# # => 1
def count(arg=(no_arg=true), &block)
if no_arg && !block
cached_dataset(:_count_ds) do
aggregate_dataset.select(COUNT_SELECT).single_value_ds
end.single_value!.to_i
else
if block
if no_arg
arg = Sequel.virtual_row(&block)
else
raise Error, 'cannot provide both argument and block to Dataset#count'
end
end
_aggregate(:count, arg)
end
end
# Deletes the records in the dataset, returning the number of records deleted.
#
# DB[:table].delete # DELETE * FROM table
# # => 3
#
# Some databases support using multiple tables in a DELETE query. This requires
# multiple FROM tables (JOINs can also be used). As multiple FROM tables use
# an implicit CROSS JOIN, you should make sure your WHERE condition uses the
# appropriate filters for the FROM tables:
#
# DB.from(:a, :b).join(:c, :d=>Sequel[:b][:e]).where{{a[:f]=>b[:g], a[:id]=>c[:h]}}.
# delete
# # DELETE FROM a
# # USING b
# # INNER JOIN c ON (c.d = b.e)
# # WHERE ((a.f = b.g) AND (a.id = c.h))
def delete(&block)
sql = delete_sql
if uses_returning?(:delete)
returning_fetch_rows(sql, &block)
else
execute_dui(sql)
end
end
# Iterates over the records in the dataset as they are yielded from the
# database adapter, and returns self.
#
# DB[:table].each{|row| p row} # SELECT * FROM table
#
# Note that this method is not safe to use on many adapters if you are
# running additional queries inside the provided block. If you are
# running queries inside the block, you should use +all+ instead of +each+
# for the outer queries, or use a separate thread or shard inside +each+.
def each
if rp = row_proc
fetch_rows(select_sql){|r| yield rp.call(r)}
else
fetch_rows(select_sql){|r| yield r}
end
self
end
EMPTY_SELECT = Sequel::SQL::AliasedExpression.new(1, :one)
# Returns true if no records exist in the dataset, false otherwise
#
# DB[:table].empty? # SELECT 1 AS one FROM table LIMIT 1
# # => false
def empty?
cached_dataset(:_empty_ds) do
single_value_ds.unordered.select(EMPTY_SELECT)
end.single_value!.nil?
end
# Returns the first matching record if no arguments are given.
# If a integer argument is given, it is interpreted as a limit, and then returns all
# matching records up to that limit. If any other type of
# argument(s) is passed, it is treated as a filter and the
# first matching record is returned. If a block is given, it is used
# to filter the dataset before returning anything.
#
# If there are no records in the dataset, returns nil (or an empty
# array if an integer argument is given).
#
# Examples:
#
# DB[:table].first # SELECT * FROM table LIMIT 1
# # => {:id=>7}
#
# DB[:table].first(2) # SELECT * FROM table LIMIT 2
# # => [{:id=>6}, {:id=>4}]
#
# DB[:table].first(id: 2) # SELECT * FROM table WHERE (id = 2) LIMIT 1
# # => {:id=>2}
#
# DB[:table].first(Sequel.lit("id = 3")) # SELECT * FROM table WHERE (id = 3) LIMIT 1
# # => {:id=>3}
#
# DB[:table].first(Sequel.lit("id = ?", 4)) # SELECT * FROM table WHERE (id = 4) LIMIT 1
# # => {:id=>4}
#
# DB[:table].first{id > 2} # SELECT * FROM table WHERE (id > 2) LIMIT 1
# # => {:id=>5}
#
# DB[:table].first(Sequel.lit("id > ?", 4)){id < 6} # SELECT * FROM table WHERE ((id > 4) AND (id < 6)) LIMIT 1
# # => {:id=>5}
#
# DB[:table].first(2){id < 2} # SELECT * FROM table WHERE (id < 2) LIMIT 2
# # => [{:id=>1}]
def first(*args, &block)
case args.length
when 0
unless block
return single_record
end
when 1
arg = args[0]
if arg.is_a?(Integer)
res = if block
if loader = cached_placeholder_literalizer(:_first_integer_cond_loader) do |pl|
where(pl.arg).limit(pl.arg)
end
loader.all(filter_expr(&block), arg)
else
where(&block).limit(arg).all
end
else
if loader = cached_placeholder_literalizer(:_first_integer_loader) do |pl|
limit(pl.arg)
end
loader.all(arg)
else
limit(arg).all
end
end
return res
end
where_args = args
args = arg
end
if loader = cached_where_placeholder_literalizer(where_args||args, block, :_first_cond_loader) do |pl|
_single_record_ds.where(pl.arg)
end
loader.first(filter_expr(args, &block))
else
_single_record_ds.where(args, &block).single_record!
end
end
# Calls first. If first returns nil (signaling that no
# row matches), raise a Sequel::NoMatchingRow exception.
def first!(*args, &block)
first(*args, &block) || raise(Sequel::NoMatchingRow.new(self))
end
# Return the column value for the first matching record in the dataset.
# Raises an error if both an argument and block is given.
#
# DB[:table].get(:id) # SELECT id FROM table LIMIT 1
# # => 3
#
# ds.get{sum(id)} # SELECT sum(id) AS v FROM table LIMIT 1
# # => 6
#
# You can pass an array of arguments to return multiple arguments,
# but you must make sure each element in the array has an alias that
# Sequel can determine:
#
# DB[:table].get([:id, :name]) # SELECT id, name FROM table LIMIT 1
# # => [3, 'foo']
#
# DB[:table].get{[sum(id).as(sum), name]} # SELECT sum(id) AS sum, name FROM table LIMIT 1
# # => [6, 'foo']
def get(column=(no_arg=true; nil), &block)
ds = naked
if block
raise(Error, 'Must call Dataset#get with an argument or a block, not both') unless no_arg
ds = ds.select(&block)
column = ds.opts[:select]
column = nil if column.is_a?(Array) && column.length < 2
else
case column
when Array
ds = ds.select(*column)
when LiteralString, Symbol, SQL::Identifier, SQL::QualifiedIdentifier, SQL::AliasedExpression
if loader = cached_placeholder_literalizer(:_get_loader) do |pl|
ds.single_value_ds.select(pl.arg)
end
return loader.get(column)
end
ds = ds.select(column)
else
if loader = cached_placeholder_literalizer(:_get_alias_loader) do |pl|
ds.single_value_ds.select(Sequel.as(pl.arg, :v))
end
return loader.get(column)
end
ds = ds.select(Sequel.as(column, :v))
end
end
if column.is_a?(Array)
if r = ds.single_record
r.values_at(*hash_key_symbols(column))
end
else
ds.single_value
end
end
# Inserts multiple records into the associated table. This method can be
# used to efficiently insert a large number of records into a table in a
# single query if the database supports it. Inserts are automatically
# wrapped in a transaction if necessary.
#
# This method is called with a columns array and an array of value arrays:
#
# DB[:table].import([:x, :y], [[1, 2], [3, 4]])
# # INSERT INTO table (x, y) VALUES (1, 2)
# # INSERT INTO table (x, y) VALUES (3, 4)
#
# or, if the database supports it:
#
# # INSERT INTO table (x, y) VALUES (1, 2), (3, 4)
#
# This method also accepts a dataset instead of an array of value arrays:
#
# DB[:table].import([:x, :y], DB[:table2].select(:a, :b))
# # INSERT INTO table (x, y) SELECT a, b FROM table2
#
# Options:
# :commit_every :: Open a new transaction for every given number of
# records. For example, if you provide a value of 50,
# will commit after every 50 records. When a
# transaction is not required, this option controls
# the maximum number of values to insert with a single
# statement; it does not force the use of a
# transaction.
# :return :: When this is set to :primary_key, returns an array of
# autoincremented primary key values for the rows inserted.
# This does not have an effect if +values+ is a Dataset.
# :server :: Set the server/shard to use for the transaction and insert
# queries.
# :slice :: Same as :commit_every, :commit_every takes precedence.
def import(columns, values, opts=OPTS)
return @db.transaction{insert(columns, values)} if values.is_a?(Dataset)
return if values.empty?
raise(Error, 'Using Sequel::Dataset#import with an empty column array is not allowed') if columns.empty?
ds = opts[:server] ? server(opts[:server]) : self
if slice_size = opts.fetch(:commit_every, opts.fetch(:slice, default_import_slice))
offset = 0
rows = []
while offset < values.length
rows << ds._import(columns, values[offset, slice_size], opts)
offset += slice_size
end
rows.flatten
else
ds._import(columns, values, opts)
end
end
# Inserts values into the associated table. The returned value is generally
# the value of the autoincremented primary key for the inserted row, assuming that
# a single row is inserted and the table has an autoincrementing primary key.
#
# +insert+ handles a number of different argument formats:
# no arguments or single empty hash :: Uses DEFAULT VALUES
# single hash :: Most common format, treats keys as columns and values as values
# single array :: Treats entries as values, with no columns
# two arrays :: Treats first array as columns, second array as values
# single Dataset :: Treats as an insert based on a selection from the dataset given,
# with no columns
# array and dataset :: Treats as an insert based on a selection from the dataset
# given, with the columns given by the array.
#
# Examples:
#
# DB[:items].insert
# # INSERT INTO items DEFAULT VALUES
#
# DB[:items].insert({})
# # INSERT INTO items DEFAULT VALUES
#
# DB[:items].insert([1,2,3])
# # INSERT INTO items VALUES (1, 2, 3)
#
# DB[:items].insert([:a, :b], [1,2])
# # INSERT INTO items (a, b) VALUES (1, 2)
#
# DB[:items].insert(a: 1, b: 2)
# # INSERT INTO items (a, b) VALUES (1, 2)
#
# DB[:items].insert(DB[:old_items])
# # INSERT INTO items SELECT * FROM old_items
#
# DB[:items].insert([:a, :b], DB[:old_items])
# # INSERT INTO items (a, b) SELECT * FROM old_items
def insert(*values, &block)
sql = insert_sql(*values)
if uses_returning?(:insert)
returning_fetch_rows(sql, &block)
else
execute_insert(sql)
end
end
# Reverses the order and then runs #first with the given arguments and block. Note that this
# will not necessarily give you the last record in the dataset,
# unless you have an unambiguous order. If there is not
# currently an order for this dataset, raises an +Error+.
#
# DB[:table].order(:id).last # SELECT * FROM table ORDER BY id DESC LIMIT 1
# # => {:id=>10}
#
# DB[:table].order(Sequel.desc(:id)).last(2) # SELECT * FROM table ORDER BY id ASC LIMIT 2
# # => [{:id=>1}, {:id=>2}]
def last(*args, &block)
raise(Error, 'No order specified') unless @opts[:order]
reverse.first(*args, &block)
end
# Maps column values for each record in the dataset (if an argument is given)
# or performs the stock mapping functionality of +Enumerable+ otherwise.
# Raises an +Error+ if both an argument and block are given.
#
# DB[:table].map(:id) # SELECT * FROM table
# # => [1, 2, 3, ...]
#
# DB[:table].map{|r| r[:id] * 2} # SELECT * FROM table
# # => [2, 4, 6, ...]
#
# You can also provide an array of column names:
#
# DB[:table].map([:id, :name]) # SELECT * FROM table
# # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
def map(column=nil, &block)
if column
raise(Error, 'Must call Dataset#map with either an argument or a block, not both') if block
return naked.map(column) if row_proc
if column.is_a?(Array)
super(){|r| r.values_at(*column)}
else
super(){|r| r[column]}
end
else
super(&block)
end
end
# Returns the maximum value for the given column/expression.
# Uses a virtual row block if no argument is given.
#
# DB[:table].max(:id) # SELECT max(id) FROM table LIMIT 1
# # => 10
# DB[:table].max{function(column)} # SELECT max(function(column)) FROM table LIMIT 1
# # => 7
def max(arg=(no_arg = true), &block)
arg = Sequel.virtual_row(&block) if no_arg
_aggregate(:max, arg)
end
# Execute a MERGE statement, which allows for INSERT, UPDATE, and DELETE
# behavior in a single query, based on whether rows from a source table
# match rows in the current table, based on the join conditions.
#
# Unless the dataset uses static SQL, to use #merge, you must first have
# called #merge_using to specify the merge source and join conditions.
# You will then likely to call one or more of the following methods
# to specify MERGE behavior by adding WHEN [NOT] MATCHED clauses:
#
# * #merge_insert
# * #merge_update
# * #merge_delete
#
# The WHEN [NOT] MATCHED clauses are added to the SQL in the order these
# methods were called on the dataset. If none of these methods are
# called, an error is raised.
#
# Example:
#
# DB[:m1]
# merge_using(:m2, i1: :i2).
# merge_insert(i1: :i2, a: Sequel[:b]+11).
# merge_delete{a > 30}.
# merge_update(i1: Sequel[:i1]+:i2+10, a: Sequel[:a]+:b+20).
# merge
#
# SQL:
#
# MERGE INTO m1 USING m2 ON (i1 = i2)
# WHEN NOT MATCHED THEN INSERT (i1, a) VALUES (i2, (b + 11))
# WHEN MATCHED AND (a > 30) THEN DELETE
# WHEN MATCHED THEN UPDATE SET i1 = (i1 + i2 + 10), a = (a + b + 20)
#
# On PostgreSQL, two additional merge methods are supported, for the
# PostgreSQL-specific DO NOTHING syntax.
#
# * #merge_do_nothing_when_matched
# * #merge_do_nothing_when_not_matched
#
# This method is supported on Oracle, but Oracle's MERGE support is
# non-standard, and has the following issues:
#
# * DELETE clause requires UPDATE clause
# * DELETE clause requires a condition
# * DELETE clause only affects rows updated by UPDATE clause
def merge
execute_ddl(merge_sql)
end
# Returns the minimum value for the given column/expression.
# Uses a virtual row block if no argument is given.
#
# DB[:table].min(:id) # SELECT min(id) FROM table LIMIT 1
# # => 1
# DB[:table].min{function(column)} # SELECT min(function(column)) FROM table LIMIT 1
# # => 0
def min(arg=(no_arg = true), &block)
arg = Sequel.virtual_row(&block) if no_arg
_aggregate(:min, arg)
end
# This is a front end for import that allows you to submit an array of
# hashes instead of arrays of columns and values:
#
# DB[:table].multi_insert([{x: 1}, {x: 2}])
# # INSERT INTO table (x) VALUES (1)
# # INSERT INTO table (x) VALUES (2)
#
# Be aware that all hashes should have the same keys if you use this calling method,
# otherwise some columns could be missed or set to null instead of to default
# values.
#
# This respects the same options as #import.
def multi_insert(hashes, opts=OPTS)
return if hashes.empty?
columns = hashes.first.keys
import(columns, hashes.map{|h| columns.map{|c| h[c]}}, opts)
end
# Yields each row in the dataset, but internally uses multiple queries as needed to
# process the entire result set without keeping all rows in the dataset in memory,
# even if the underlying driver buffers all query results in memory.
#
# Because this uses multiple queries internally, in order to remain consistent,
# it also uses a transaction internally. Additionally, to work correctly, the dataset
# must have unambiguous order. Using an ambiguous order can result in an infinite loop,
# as well as subtler bugs such as yielding duplicate rows or rows being skipped.
#
# Sequel checks that the datasets using this method have an order, but it cannot
# ensure that the order is unambiguous.
#
# Note that this method is not safe to use on many adapters if you are
# running additional queries inside the provided block. If you are
# running queries inside the block, use a separate thread or shard inside +paged_each+.
#
# Options:
# :rows_per_fetch :: The number of rows to fetch per query. Defaults to 1000.
# :strategy :: The strategy to use for paging of results. By default this is :offset,
# for using an approach with a limit and offset for every page. This can
# be set to :filter, which uses a limit and a filter that excludes
# rows from previous pages. In order for this strategy to work, you must be
# selecting the columns you are ordering by, and none of the columns can contain
# NULLs. Note that some Sequel adapters have optimized implementations that will
# use cursors or streaming regardless of the :strategy option used.
# :filter_values :: If the strategy: :filter option is used, this option should be a proc
# that accepts the last retrieved row for the previous page and an array of
# ORDER BY expressions, and returns an array of values relating to those
# expressions for the last retrieved row. You will need to use this option
# if your ORDER BY expressions are not simple columns, if they contain
# qualified identifiers that would be ambiguous unqualified, if they contain
# any identifiers that are aliased in SELECT, and potentially other cases.
#
# Examples:
#
# DB[:table].order(:id).paged_each{|row| }
# # SELECT * FROM table ORDER BY id LIMIT 1000
# # SELECT * FROM table ORDER BY id LIMIT 1000 OFFSET 1000
# # ...
#
# DB[:table].order(:id).paged_each(rows_per_fetch: 100){|row| }
# # SELECT * FROM table ORDER BY id LIMIT 100
# # SELECT * FROM table ORDER BY id LIMIT 100 OFFSET 100
# # ...
#
# DB[:table].order(:id).paged_each(strategy: :filter){|row| }
# # SELECT * FROM table ORDER BY id LIMIT 1000
# # SELECT * FROM table WHERE id > 1001 ORDER BY id LIMIT 1000
# # ...
#
# DB[:table].order(:id).paged_each(strategy: :filter,
# filter_values: lambda{|row, exprs| [row[:id]]}){|row| }
# # SELECT * FROM table ORDER BY id LIMIT 1000
# # SELECT * FROM table WHERE id > 1001 ORDER BY id LIMIT 1000
# # ...
def paged_each(opts=OPTS)
unless @opts[:order]
raise Sequel::Error, "Dataset#paged_each requires the dataset be ordered"
end
unless defined?(yield)
return enum_for(:paged_each, opts)
end
total_limit = @opts[:limit]
offset = @opts[:offset]
if server = @opts[:server]
opts = Hash[opts]
opts[:server] = server
end
rows_per_fetch = opts[:rows_per_fetch] || 1000
strategy = if offset || total_limit
:offset
else
opts[:strategy] || :offset
end
db.transaction(opts) do
case strategy
when :filter
filter_values = opts[:filter_values] || proc{|row, exprs| exprs.map{|e| row[hash_key_symbol(e)]}}
base_ds = ds = limit(rows_per_fetch)
while ds
last_row = nil
ds.each do |row|
last_row = row
yield row
end
ds = (base_ds.where(ignore_values_preceding(last_row, &filter_values)) if last_row)
end
else
offset ||= 0
num_rows_yielded = rows_per_fetch
total_rows = 0
while num_rows_yielded == rows_per_fetch && (total_limit.nil? || total_rows < total_limit)
if total_limit && total_rows + rows_per_fetch > total_limit
rows_per_fetch = total_limit - total_rows
end
num_rows_yielded = 0
limit(rows_per_fetch, offset).each do |row|
num_rows_yielded += 1
total_rows += 1 if total_limit
yield row
end
offset += rows_per_fetch
end
end
end
self
end
# Returns a hash with key_column values as keys and value_column values as
# values. Similar to as_hash, but only selects the columns given. Like
# as_hash, it accepts an optional :hash parameter, into which entries will
# be merged.
#
# DB[:table].select_hash(:id, :name)
# # SELECT id, name FROM table
# # => {1=>'a', 2=>'b', ...}
#
# You can also provide an array of column names for either the key_column,
# the value column, or both:
#
# DB[:table].select_hash([:id, :foo], [:name, :bar])
# # SELECT id, foo, name, bar FROM table
# # => {[1, 3]=>['a', 'c'], [2, 4]=>['b', 'd'], ...}
#
# When using this method, you must be sure that each expression has an alias
# that Sequel can determine.
def select_hash(key_column, value_column, opts = OPTS)
_select_hash(:as_hash, key_column, value_column, opts)
end
# Returns a hash with key_column values as keys and an array of value_column values.
# Similar to to_hash_groups, but only selects the columns given. Like to_hash_groups,
# it accepts an optional :hash parameter, into which entries will be merged.
#
# DB[:table].select_hash_groups(:name, :id)
# # SELECT id, name FROM table
# # => {'a'=>[1, 4, ...], 'b'=>[2, ...], ...}
#
# You can also provide an array of column names for either the key_column,
# the value column, or both:
#
# DB[:table].select_hash_groups([:first, :middle], [:last, :id])
# # SELECT first, middle, last, id FROM table
# # => {['a', 'b']=>[['c', 1], ['d', 2], ...], ...}
#
# When using this method, you must be sure that each expression has an alias
# that Sequel can determine.
def select_hash_groups(key_column, value_column, opts = OPTS)
_select_hash(:to_hash_groups, key_column, value_column, opts)
end
# Selects the column given (either as an argument or as a block), and
# returns an array of all values of that column in the dataset. If you
# give a block argument that returns an array with multiple entries,
# the contents of the resulting array are undefined. Raises an Error
# if called with both an argument and a block.
#
# DB[:table].select_map(:id) # SELECT id FROM table
# # => [3, 5, 8, 1, ...]
#
# DB[:table].select_map{id * 2} # SELECT (id * 2) FROM table
# # => [6, 10, 16, 2, ...]
#
# You can also provide an array of column names:
#
# DB[:table].select_map([:id, :name]) # SELECT id, name FROM table
# # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
#
# If you provide an array of expressions, you must be sure that each entry
# in the array has an alias that Sequel can determine.
def select_map(column=nil, &block)
_select_map(column, false, &block)
end
# The same as select_map, but in addition orders the array by the column.
#
# DB[:table].select_order_map(:id) # SELECT id FROM table ORDER BY id
# # => [1, 2, 3, 4, ...]
#
# DB[:table].select_order_map{id * 2} # SELECT (id * 2) FROM table ORDER BY (id * 2)
# # => [2, 4, 6, 8, ...]
#
# You can also provide an array of column names:
#
# DB[:table].select_order_map([:id, :name]) # SELECT id, name FROM table ORDER BY id, name
# # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
#
# If you provide an array of expressions, you must be sure that each entry
# in the array has an alias that Sequel can determine.
def select_order_map(column=nil, &block)
_select_map(column, true, &block)
end
# Limits the dataset to one record, and returns the first record in the dataset,
# or nil if the dataset has no records. Users should probably use +first+ instead of
# this method. Example:
#
# DB[:test].single_record # SELECT * FROM test LIMIT 1
# # => {:column_name=>'value'}
def single_record
_single_record_ds.single_record!
end
# Returns the first record in dataset, without limiting the dataset. Returns nil if
# the dataset has no records. Users should probably use +first+ instead of this method.
# This should only be used if you know the dataset is already limited to a single record.
# This method may be desirable to use for performance reasons, as it does not clone the
# receiver. Example:
#
# DB[:test].single_record! # SELECT * FROM test
# # => {:column_name=>'value'}
def single_record!
with_sql_first(select_sql)
end
# Returns the first value of the first record in the dataset.
# Returns nil if dataset is empty. Users should generally use
# +get+ instead of this method. Example:
#
# DB[:test].single_value # SELECT * FROM test LIMIT 1
# # => 'value'
def single_value
single_value_ds.each do |r|
r.each{|_, v| return v}
end
nil
end
# Returns the first value of the first record in the dataset, without limiting the dataset.
# Returns nil if the dataset is empty. Users should generally use +get+ instead of this
# method. Should not be used on graphed datasets or datasets that have row_procs that
# don't return hashes. This method may be desirable to use for performance reasons, as
# it does not clone the receiver.
#
# DB[:test].single_value! # SELECT * FROM test
# # => 'value'
def single_value!
with_sql_single_value(select_sql)
end
# Returns the sum for the given column/expression.
# Uses a virtual row block if no column is given.
#
# DB[:table].sum(:id) # SELECT sum(id) FROM table LIMIT 1
# # => 55
# DB[:table].sum{function(column)} # SELECT sum(function(column)) FROM table LIMIT 1
# # => 10
def sum(arg=(no_arg = true), &block)
arg = Sequel.virtual_row(&block) if no_arg
_aggregate(:sum, arg)
end
# Returns a hash with one column used as key and another used as value.
# If rows have duplicate values for the key column, the latter row(s)
# will overwrite the value of the previous row(s). If the value_column
# is not given or nil, uses the entire hash as the value.
#
# DB[:table].as_hash(:id, :name) # SELECT * FROM table
# # {1=>'Jim', 2=>'Bob', ...}
#
# DB[:table].as_hash(:id) # SELECT * FROM table
# # {1=>{:id=>1, :name=>'Jim'}, 2=>{:id=>2, :name=>'Bob'}, ...}
#
# You can also provide an array of column names for either the key_column,
# the value column, or both:
#
# DB[:table].as_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table
# # {[1, 3]=>['Jim', 'bo'], [2, 4]=>['Bob', 'be'], ...}
#
# DB[:table].as_hash([:id, :name]) # SELECT * FROM table
# # {[1, 'Jim']=>{:id=>1, :name=>'Jim'}, [2, 'Bob']=>{:id=>2, :name=>'Bob'}, ...}
#
# Options:
# :all :: Use all instead of each to retrieve the objects
# :hash :: The object into which the values will be placed. If this is not
# given, an empty hash is used. This can be used to use a hash with
# a default value or default proc.
def as_hash(key_column, value_column = nil, opts = OPTS)
h = opts[:hash] || {}
meth = opts[:all] ? :all : :each
if value_column
return naked.as_hash(key_column, value_column, opts) if row_proc
if value_column.is_a?(Array)
if key_column.is_a?(Array)
public_send(meth){|r| h[r.values_at(*key_column)] = r.values_at(*value_column)}
else
public_send(meth){|r| h[r[key_column]] = r.values_at(*value_column)}
end
else
if key_column.is_a?(Array)
public_send(meth){|r| h[r.values_at(*key_column)] = r[value_column]}
else
public_send(meth){|r| h[r[key_column]] = r[value_column]}
end
end
elsif key_column.is_a?(Array)
public_send(meth){|r| h[key_column.map{|k| r[k]}] = r}
else
public_send(meth){|r| h[r[key_column]] = r}
end
h
end
# Alias of as_hash for backwards compatibility.
def to_hash(*a)
as_hash(*a)
end
# Returns a hash with one column used as key and the values being an
# array of column values. If the value_column is not given or nil, uses
# the entire hash as the value.
#
# DB[:table].to_hash_groups(:name, :id) # SELECT * FROM table
# # {'Jim'=>[1, 4, 16, ...], 'Bob'=>[2], ...}
#
# DB[:table].to_hash_groups(:name) # SELECT * FROM table
# # {'Jim'=>[{:id=>1, :name=>'Jim'}, {:id=>4, :name=>'Jim'}, ...], 'Bob'=>[{:id=>2, :name=>'Bob'}], ...}
#
# You can also provide an array of column names for either the key_column,
# the value column, or both:
#
# DB[:table].to_hash_groups([:first, :middle], [:last, :id]) # SELECT * FROM table
# # {['Jim', 'Bob']=>[['Smith', 1], ['Jackson', 4], ...], ...}
#
# DB[:table].to_hash_groups([:first, :middle]) # SELECT * FROM table
# # {['Jim', 'Bob']=>[{:id=>1, :first=>'Jim', :middle=>'Bob', :last=>'Smith'}, ...], ...}
#
# Options:
# :all :: Use all instead of each to retrieve the objects
# :hash :: The object into which the values will be placed. If this is not
# given, an empty hash is used. This can be used to use a hash with
# a default value or default proc.
def to_hash_groups(key_column, value_column = nil, opts = OPTS)
h = opts[:hash] || {}
meth = opts[:all] ? :all : :each
if value_column
return naked.to_hash_groups(key_column, value_column, opts) if row_proc
if value_column.is_a?(Array)
if key_column.is_a?(Array)
public_send(meth){|r| (h[r.values_at(*key_column)] ||= []) << r.values_at(*value_column)}
else
public_send(meth){|r| (h[r[key_column]] ||= []) << r.values_at(*value_column)}
end
else
if key_column.is_a?(Array)
public_send(meth){|r| (h[r.values_at(*key_column)] ||= []) << r[value_column]}
else
public_send(meth){|r| (h[r[key_column]] ||= []) << r[value_column]}
end
end
elsif key_column.is_a?(Array)
public_send(meth){|r| (h[key_column.map{|k| r[k]}] ||= []) << r}
else
public_send(meth){|r| (h[r[key_column]] ||= []) << r}
end
h
end
# Truncates the dataset. Returns nil.
#
# DB[:table].truncate # TRUNCATE table
# # => nil
def truncate
execute_ddl(truncate_sql)
end
# Updates values for the dataset. The returned value is the number of rows updated.
# +values+ should be a hash where the keys are columns to set and values are the values to
# which to set the columns.
#
# DB[:table].update(x: nil) # UPDATE table SET x = NULL
# # => 10
#
# DB[:table].update(x: Sequel[:x]+1, y: 0) # UPDATE table SET x = (x + 1), y = 0
# # => 10
#
# Some databases support using multiple tables in an UPDATE query. This requires
# multiple FROM tables (JOINs can also be used). As multiple FROM tables use
# an implicit CROSS JOIN, you should make sure your WHERE condition uses the
# appropriate filters for the FROM tables:
#
# DB.from(:a, :b).join(:c, :d=>Sequel[:b][:e]).where{{a[:f]=>b[:g], a[:id]=>10}}.
# update(:f=>Sequel[:c][:h])
# # UPDATE a
# # SET f = c.h
# # FROM b
# # INNER JOIN c ON (c.d = b.e)
# # WHERE ((a.f = b.g) AND (a.id = 10))
def update(values=OPTS, &block)
sql = update_sql(values)
if uses_returning?(:update)
returning_fetch_rows(sql, &block)
else
execute_dui(sql)
end
end
# Return an array of all rows matching the given filter condition, also
# yielding each row to the given block. Basically the same as where(cond).all(&block),
# except it can be optimized to not create an intermediate dataset.
#
# DB[:table].where_all(id: [1,2,3])
# # SELECT * FROM table WHERE (id IN (1, 2, 3))
def where_all(cond, &block)
if loader = _where_loader([cond], nil)
loader.all(filter_expr(cond), &block)
else
where(cond).all(&block)
end
end
# Iterate over all rows matching the given filter condition,
# yielding each row to the given block. Basically the same as where(cond).each(&block),
# except it can be optimized to not create an intermediate dataset.
#
# DB[:table].where_each(id: [1,2,3]){|row| p row}
# # SELECT * FROM table WHERE (id IN (1, 2, 3))
def where_each(cond, &block)
if loader = _where_loader([cond], nil)
loader.each(filter_expr(cond), &block)
else
where(cond).each(&block)
end
end
# Filter the datasets using the given filter condition, then return a single value.
# This assumes that the dataset has already been setup to limit the selection to
# a single column. Basically the same as where(cond).single_value,
# except it can be optimized to not create an intermediate dataset.
#
# DB[:table].select(:name).where_single_value(id: 1)
# # SELECT name FROM table WHERE (id = 1) LIMIT 1
def where_single_value(cond)
if loader = cached_where_placeholder_literalizer([cond], nil, :_where_single_value_loader) do |pl|
single_value_ds.where(pl.arg)
end
loader.get(filter_expr(cond))
else
where(cond).single_value
end
end
# Run the given SQL and return an array of all rows. If a block is given,
# each row is yielded to the block after all rows are loaded. See with_sql_each.
def with_sql_all(sql, &block)
_all(block){|a| with_sql_each(sql){|r| a << r}}
end
# Execute the given SQL and return the number of rows deleted. This exists
# solely as an optimization, replacing with_sql(sql).delete. It's significantly
# faster as it does not require cloning the current dataset.
def with_sql_delete(sql)
execute_dui(sql)
end
alias with_sql_update with_sql_delete
# Run the given SQL and yield each returned row to the block.
def with_sql_each(sql)
if rp = row_proc
_with_sql_dataset.fetch_rows(sql){|r| yield rp.call(r)}
else
_with_sql_dataset.fetch_rows(sql){|r| yield r}
end
self
end
# Run the given SQL and return the first row, or nil if no rows were returned.
# See with_sql_each.
def with_sql_first(sql)
with_sql_each(sql){|r| return r}
nil
end
# Run the given SQL and return the first value in the first row, or nil if no
# rows were returned. For this to make sense, the SQL given should select
# only a single value. See with_sql_each.
def with_sql_single_value(sql)
if r = with_sql_first(sql)
r.each{|_, v| return v}
end
end
# Execute the given SQL and (on most databases) return the primary key of the
# inserted row.
def with_sql_insert(sql)
execute_insert(sql)
end
protected
# Internals of #import. If primary key values are requested, use
# separate insert commands for each row. Otherwise, call #multi_insert_sql
# and execute each statement it gives separately. A transaction is only used
# if there are multiple statements to execute.
def _import(columns, values, opts)
trans_opts = Hash[opts]
trans_opts[:server] = @opts[:server]
if opts[:return] == :primary_key
_import_transaction(values, trans_opts){values.map{|v| insert(columns, v)}}
else
stmts = multi_insert_sql(columns, values)
_import_transaction(stmts, trans_opts){stmts.each{|st| execute_dui(st)}}
end
end
# Return an array of arrays of values given by the symbols in ret_cols.
def _select_map_multiple(ret_cols)
map{|r| r.values_at(*ret_cols)}
end
# Returns an array of the first value in each row.
def _select_map_single
k = nil
map{|r| r[k||=r.keys.first]}
end
# A dataset for returning single values from the current dataset.
def single_value_ds
clone(:limit=>1).ungraphed.naked
end
private
# Internals of all and with_sql_all
def _all(block)
a = []
yield a
post_load(a)
a.each(&block) if block
a
end
# Cached placeholder literalizer for methods that return values using aggregate functions.
def _aggregate(function, arg)
if loader = cached_placeholder_literalizer(:"_#{function}_loader") do |pl|
aggregate_dataset.limit(1).select(SQL::Function.new(function, pl.arg).as(function))
end
loader.get(arg)
else
aggregate_dataset.get(SQL::Function.new(function, arg).as(function))
end
end
# Use a transaction when yielding to the block if multiple values/statements
# are provided. When only a single value or statement is provided, then yield
# without using a transaction.
def _import_transaction(values, trans_opts, &block)
if values.length > 1
@db.transaction(trans_opts, &block)
else
yield
end
end
# Internals of +select_hash+ and +select_hash_groups+
def _select_hash(meth, key_column, value_column, opts=OPTS)
select(*(key_column.is_a?(Array) ? key_column : [key_column]) + (value_column.is_a?(Array) ? value_column : [value_column])).
public_send(meth, hash_key_symbols(key_column), hash_key_symbols(value_column), opts)
end
# Internals of +select_map+ and +select_order_map+
def _select_map(column, order, &block)
ds = ungraphed.naked
columns = Array(column)
virtual_row_columns(columns, block)
select_cols = order ? columns.map{|c| c.is_a?(SQL::OrderedExpression) ? c.expression : c} : columns
ds = ds.order(*columns.map{|c| unaliased_identifier(c)}) if order
if column.is_a?(Array) || (columns.length > 1)
ds.select(*select_cols)._select_map_multiple(hash_key_symbols(select_cols))
else
ds.select(auto_alias_expression(select_cols.first))._select_map_single
end
end
# A cached dataset for a single record for this dataset.
def _single_record_ds
cached_dataset(:_single_record_ds){clone(:limit=>1)}
end
# Loader used for where_all and where_each.
def _where_loader(where_args, where_block)
cached_where_placeholder_literalizer(where_args, where_block, :_where_loader) do |pl|
where(pl.arg)
end
end
# Automatically alias the given expression if it does not have an identifiable alias.
def auto_alias_expression(v)
case v
when LiteralString, Symbol, SQL::Identifier, SQL::QualifiedIdentifier, SQL::AliasedExpression
v
else
SQL::AliasedExpression.new(v, :v)
end
end
# The default number of rows that can be inserted in a single INSERT statement via import.
# The default is for no limit.
def default_import_slice
nil
end
# Set the server to use to :default unless it is already set in the passed opts
def default_server_opts(opts)
if @db.sharded? && !opts.has_key?(:server)
opts = Hash[opts]
opts[:server] = @opts[:server] || :default
end
opts
end
# Execute the given select SQL on the database using execute. Use the
# :read_only server unless a specific server is set.
def execute(sql, opts=OPTS, &block)
db = @db
if db.sharded? && !opts.has_key?(:server)
opts = Hash[opts]
opts[:server] = @opts[:server] || (@opts[:lock] ? :default : :read_only)
opts
end
db.execute(sql, opts, &block)
end
# Execute the given SQL on the database using execute_ddl.
def execute_ddl(sql, opts=OPTS, &block)
@db.execute_ddl(sql, default_server_opts(opts), &block)
nil
end
# Execute the given SQL on the database using execute_dui.
def execute_dui(sql, opts=OPTS, &block)
@db.execute_dui(sql, default_server_opts(opts), &block)
end
# Execute the given SQL on the database using execute_insert.
def execute_insert(sql, opts=OPTS, &block)
@db.execute_insert(sql, default_server_opts(opts), &block)
end
# Return a plain symbol given a potentially qualified or aliased symbol,
# specifying the symbol that is likely to be used as the hash key
# for the column when records are returned. Return nil if no hash key
# can be determined
def _hash_key_symbol(s, recursing=false)
case s
when Symbol
_, c, a = split_symbol(s)
(a || c).to_sym
when SQL::Identifier, SQL::Wrapper
_hash_key_symbol(s.value, true)
when SQL::QualifiedIdentifier
_hash_key_symbol(s.column, true)
when SQL::AliasedExpression
_hash_key_symbol(s.alias, true)
when String
s.to_sym if recursing
end
end
# Return a plain symbol given a potentially qualified or aliased symbol,
# specifying the symbol that is likely to be used as the hash key
# for the column when records are returned. Raise Error if the hash key
# symbol cannot be returned.
def hash_key_symbol(s)
if v = _hash_key_symbol(s)
v
else
raise(Error, "#{s.inspect} is not supported, should be a Symbol, SQL::Identifier, SQL::QualifiedIdentifier, or SQL::AliasedExpression")
end
end
# If s is an array, return an array with the given hash key symbols.
# Otherwise, return a hash key symbol for the given expression
# If a hash key symbol cannot be determined, raise an error.
def hash_key_symbols(s)
s.is_a?(Array) ? s.map{|c| hash_key_symbol(c)} : hash_key_symbol(s)
end
# Returns an expression that will ignore values preceding the given row, using the
# receiver's current order. This yields the row and the array of order expressions
# to the block, which should return an array of values to use.
def ignore_values_preceding(row)
@opts[:order].map{|v| v.is_a?(SQL::OrderedExpression) ? v.expression : v}
order_exprs = @opts[:order].map do |v|
if v.is_a?(SQL::OrderedExpression)
descending = v.descending
v = v.expression
else
descending = false
end
[v, descending]
end
row_values = yield(row, order_exprs.map(&:first))
last_expr = []
cond = order_exprs.zip(row_values).map do |(v, descending), value|
expr = last_expr + [SQL::BooleanExpression.new(descending ? :< : :>, v, value)]
last_expr += [SQL::BooleanExpression.new(:'=', v, value)]
Sequel.&(*expr)
end
Sequel.|(*cond)
end
# Downcase identifiers by default when outputing them from the database.
def output_identifier(v)
v = 'untitled' if v == ''
v.to_s.downcase.to_sym
end
# This is run inside .all, after all of the records have been loaded
# via .each, but before any block passed to all is called. It is called with
# a single argument, an array of all returned records. Does nothing by
# default, added to make the model eager loading code simpler.
def post_load(all_records)
end
# Called by insert/update/delete when returning is used.
# Yields each row as a plain hash to the block if one is given, or returns
# an array of plain hashes for all rows if a block is not given
def returning_fetch_rows(sql, &block)
if block
default_server.fetch_rows(sql, &block)
nil
else
rows = []
default_server.fetch_rows(sql){|r| rows << r}
rows
end
end
# Return the unaliased part of the identifier. Handles both
# implicit aliases in symbols, as well as SQL::AliasedExpression
# objects. Other objects are returned as is.
def unaliased_identifier(c)
case c
when Symbol
table, column, aliaz = split_symbol(c)
if aliaz
table ? SQL::QualifiedIdentifier.new(table, column) : Sequel.identifier(column)
else
c
end
when SQL::AliasedExpression
c.expression
when SQL::OrderedExpression
case expr = c.expression
when Symbol, SQL::AliasedExpression
SQL::OrderedExpression.new(unaliased_identifier(expr), c.descending, :nulls=>c.nulls)
else
c
end
else
c
end
end
# Cached dataset to use for with_sql_#{all,each,first,single_value}.
# This is used so that the columns returned by the given SQL do not
# affect the receiver of the with_sql_* method.
def _with_sql_dataset
if @opts[:_with_sql_ds]
self
else
cached_dataset(:_with_sql_ds) do
clone(:_with_sql_ds=>true)
end
end
end
end
end
sequel-5.63.0/lib/sequel/dataset/dataset_module.rb 0000664 0000000 0000000 00000002715 14342141206 0022115 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# This Module subclass is used by Database#extend_datasets
# and Dataset#with_extend to add dataset methods to classes.
# It adds some helper methods inside the module that can define
# named methods on the dataset instances which do specific actions.
# For example:
#
# DB.extend_datasets do
# order :by_id, :id
# select :with_id_and_name, :id, :name
# where :active, :active
# end
#
# DB[:table].active.with_id_and_name.by_id
# # SELECT id, name FROM table WHERE active ORDER BY id
class DatasetModule < ::Module
meths = (<<-METHS).split.map(&:to_sym)
where exclude exclude_having having
distinct grep group group_and_count group_append
limit offset order order_append order_prepend reverse
select select_all select_append select_group server
METHS
# Define a method in the module
def self.def_dataset_caching_method(mod, meth)
mod.send(:define_method, meth) do |name, *args, &block|
if block
define_method(name){public_send(meth, *args, &block)}
else
key = :"_#{meth}_#{name}_ds"
define_method(name) do
cached_dataset(key){public_send(meth, *args)}
end
end
end
end
meths.each do |meth|
def_dataset_caching_method(self, meth)
end
end
end
end
sequel-5.63.0/lib/sequel/dataset/features.rb 0000664 0000000 0000000 00000020324 14342141206 0020735 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 4 - Methods that describe what the dataset supports
# These methods all return booleans, with most describing whether or not the
# dataset supports a feature.
# ---------------------
# Whether this dataset quotes identifiers.
def quote_identifiers?
@opts.fetch(:quote_identifiers, true)
end
# Whether this dataset will provide accurate number of rows matched for
# delete and update statements, true by default. Accurate in this case is the number of
# rows matched by the dataset's filter.
def provides_accurate_rows_matched?
true
end
# Whether you must use a column alias list for recursive CTEs, false by default.
def recursive_cte_requires_column_aliases?
false
end
# Whether the dataset requires SQL standard datetimes. False by default,
# as most allow strings with ISO 8601 format.
def requires_sql_standard_datetimes?
false
end
# Whether type specifiers are required for prepared statement/bound
# variable argument placeholders (i.e. :bv__integer), false by default.
def requires_placeholder_type_specifiers?
false
end
# Whether the dataset supports common table expressions, false by default.
# If given, +type+ can be :select, :insert, :update, or :delete, in which case it
# determines whether WITH is supported for the respective statement type.
def supports_cte?(type=:select)
false
end
# Whether the dataset supports common table expressions in subqueries, false by default.
# If false, applies the WITH clause to the main query, which can cause issues
# if multiple WITH clauses use the same name.
def supports_cte_in_subqueries?
false
end
# Whether deleting from joined datasets is supported, false by default.
def supports_deleting_joins?
supports_modifying_joins?
end
# Whether the database supports derived column lists (e.g.
# "table_expr AS table_alias(column_alias1, column_alias2, ...)"), true by
# default.
def supports_derived_column_lists?
true
end
# Whether the dataset supports or can emulate the DISTINCT ON clause, false by default.
def supports_distinct_on?
false
end
# Whether the dataset supports CUBE with GROUP BY, false by default.
def supports_group_cube?
false
end
# Whether the dataset supports ROLLUP with GROUP BY, false by default.
def supports_group_rollup?
false
end
# Whether the dataset supports GROUPING SETS with GROUP BY, false by default.
def supports_grouping_sets?
false
end
# Whether this dataset supports the +insert_select+ method for returning all columns values
# directly from an insert query, false by default.
def supports_insert_select?
supports_returning?(:insert)
end
# Whether the dataset supports the INTERSECT and EXCEPT compound operations, true by default.
def supports_intersect_except?
true
end
# Whether the dataset supports the INTERSECT ALL and EXCEPT ALL compound operations, true by default.
def supports_intersect_except_all?
true
end
# Whether the dataset supports the IS TRUE syntax, true by default.
def supports_is_true?
true
end
# Whether the dataset supports the JOIN table USING (column1, ...) syntax, true by default.
# If false, support is emulated using JOIN table ON (table.column1 = other_table.column1).
def supports_join_using?
true
end
# Whether the dataset supports LATERAL for subqueries in the FROM or JOIN clauses, false by default.
def supports_lateral_subqueries?
false
end
# Whether limits are supported in correlated subqueries, true by default.
def supports_limits_in_correlated_subqueries?
true
end
# Whether the dataset supports skipping raising an error instead of waiting for locked rows when returning data, false by default.
def supports_nowait?
false
end
# Whether the MERGE statement is supported, false by default.
def supports_merge?
false
end
# Whether modifying joined datasets is supported, false by default.
def supports_modifying_joins?
false
end
# Whether the IN/NOT IN operators support multiple columns when an
# array of values is given, true by default.
def supports_multiple_column_in?
true
end
# Whether offsets are supported in correlated subqueries, true by default.
def supports_offsets_in_correlated_subqueries?
true
end
# Whether the dataset supports or can fully emulate the DISTINCT ON clause,
# including respecting the ORDER BY clause, false by default.
def supports_ordered_distinct_on?
supports_distinct_on?
end
# Whether placeholder literalizers are supported, true by default.
def supports_placeholder_literalizer?
true
end
# Whether the dataset supports pattern matching by regular expressions, false by default.
def supports_regexp?
false
end
# Whether the dataset supports REPLACE syntax, false by default.
def supports_replace?
false
end
# Whether the RETURNING clause is supported for the given type of query, false by default.
# +type+ can be :insert, :update, or :delete.
def supports_returning?(type)
false
end
# Whether the dataset supports skipping locked rows when returning data, false by default.
def supports_skip_locked?
false
end
# Whether the database supports SELECT *, column FROM table, true by default.
def supports_select_all_and_column?
true
end
# Whether the dataset supports timezones in literal timestamps, false by default.
def supports_timestamp_timezones?
false
end
# Whether the dataset supports fractional seconds in literal timestamps, true by default.
def supports_timestamp_usecs?
true
end
# Whether updating joined datasets is supported, false by default.
def supports_updating_joins?
supports_modifying_joins?
end
# Whether the dataset supports the WINDOW clause to define windows used by multiple
# window functions, false by default.
def supports_window_clause?
false
end
# Whether the dataset supports window functions, false by default.
def supports_window_functions?
false
end
# Whether the dataset supports the given window function option. True by default.
# This should only be called if supports_window_functions? is true. Possible options
# are :rows, :range, :groups, :offset, :exclude.
def supports_window_function_frame_option?(option)
case option
when :rows, :range, :offset
true
else
false
end
end
# Whether the dataset supports WHERE TRUE (or WHERE 1 for databases that
# that use 1 for true), true by default.
def supports_where_true?
true
end
private
# Whether insert(nil) or insert({}) must be emulated by
# using at least one value.
def insert_supports_empty_values?
true
end
# Whether the dataset needs ESCAPE for LIKE for correct behavior.
def requires_like_escape?
true
end
# Whether ORDER BY col NULLS FIRST/LAST must be emulated.
def requires_emulating_nulls_first?
false
end
# Whether common table expressions are supported in UNION/INTERSECT/EXCEPT clauses.
def supports_cte_in_compounds?
supports_cte_in_subqueries?
end
# Whether the dataset supports the FILTER clause for aggregate functions.
# If not, support is emulated using CASE.
def supports_filtered_aggregates?
false
end
# Whether the database supports quoting function names.
def supports_quoted_function_names?
false
end
# Whether the RETURNING clause is used for the given dataset.
# +type+ can be :insert, :update, or :delete.
def uses_returning?(type)
opts[:returning] && !@opts[:sql] && supports_returning?(type)
end
# Whether the dataset uses WITH ROLLUP/CUBE instead of ROLLUP()/CUBE().
def uses_with_rollup?
false
end
end
end
sequel-5.63.0/lib/sequel/dataset/graph.rb 0000664 0000000 0000000 00000031142 14342141206 0020220 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 5 - Methods related to dataset graphing
# Dataset graphing automatically creates unique aliases columns in join
# tables that overlap with already selected column aliases.
# All of these methods return modified copies of the receiver.
# ---------------------
# Adds the given graph aliases to the list of graph aliases to use,
# unlike +set_graph_aliases+, which replaces the list (the equivalent
# of +select_append+ when graphing). See +set_graph_aliases+.
#
# DB[:table].add_graph_aliases(some_alias: [:table, :column])
# # SELECT ..., table.column AS some_alias
def add_graph_aliases(graph_aliases)
graph = opts[:graph]
unless (graph && (ga = graph[:column_aliases]))
raise Error, "cannot call add_graph_aliases on a dataset that has not been called with graph or set_graph_aliases"
end
columns, graph_aliases = graph_alias_columns(graph_aliases)
select_append(*columns).clone(:graph => graph.merge(:column_aliases=>ga.merge(graph_aliases).freeze).freeze)
end
# Similar to Dataset#join_table, but uses unambiguous aliases for selected
# columns and keeps metadata about the aliases for use in other methods.
#
# Arguments:
# dataset :: Can be a symbol (specifying a table), another dataset,
# or an SQL::Identifier, SQL::QualifiedIdentifier, or SQL::AliasedExpression.
# join_conditions :: Any condition(s) allowed by +join_table+.
# block :: A block that is passed to +join_table+.
#
# Options:
# :from_self_alias :: The alias to use when the receiver is not a graphed
# dataset but it contains multiple FROM tables or a JOIN. In this case,
# the receiver is wrapped in a from_self before graphing, and this option
# determines the alias to use.
# :implicit_qualifier :: The qualifier of implicit conditions, see #join_table.
# :join_only :: Only join the tables, do not change the selected columns.
# :join_type :: The type of join to use (passed to +join_table+). Defaults to :left_outer.
# :qualify:: The type of qualification to do, see #join_table.
# :select :: An array of columns to select. When not used, selects
# all columns in the given dataset. When set to false, selects no
# columns and is like simply joining the tables, though graph keeps
# some metadata about the join that makes it important to use +graph+ instead
# of +join_table+.
# :table_alias :: The alias to use for the table. If not specified, doesn't
# alias the table. You will get an error if the alias (or table) name is
# used more than once.
def graph(dataset, join_conditions = nil, options = OPTS, &block)
# Allow the use of a dataset or symbol as the first argument
# Find the table name/dataset based on the argument
table_alias = options[:table_alias]
table = dataset
create_dataset = true
case dataset
when Symbol
# let alias be the same as the table name (sans any optional schema)
# unless alias explicitly given in the symbol using ___ notation and symbol splitting is enabled
table_alias ||= split_symbol(table).compact.last
when Dataset
if dataset.simple_select_all?
table = dataset.opts[:from].first
table_alias ||= table
else
table_alias ||= dataset_alias((@opts[:num_dataset_sources] || 0)+1)
end
create_dataset = false
when SQL::Identifier
table_alias ||= table.value
when SQL::QualifiedIdentifier
table_alias ||= split_qualifiers(table).last
when SQL::AliasedExpression
return graph(table.expression, join_conditions, {:table_alias=>table.alias}.merge!(options), &block)
else
raise Error, "The dataset argument should be a symbol or dataset"
end
table_alias = table_alias.to_sym
if create_dataset
dataset = db.from(table)
end
# Raise Sequel::Error with explanation that the table alias has been used
raise_alias_error = lambda do
raise(Error, "this #{options[:table_alias] ? 'alias' : 'table'} has already been been used, please specify " \
"#{options[:table_alias] ? 'a different alias' : 'an alias via the :table_alias option'}")
end
# Only allow table aliases that haven't been used
raise_alias_error.call if @opts[:graph] && @opts[:graph][:table_aliases] && @opts[:graph][:table_aliases].include?(table_alias)
table_alias_qualifier = qualifier_from_alias_symbol(table_alias, table)
implicit_qualifier = options[:implicit_qualifier]
joined_dataset = joined_dataset?
ds = self
graph = opts[:graph]
if !graph && (select = @opts[:select]) && !select.empty?
select_columns = nil
unless !joined_dataset && select.length == 1 && (select[0].is_a?(SQL::ColumnAll))
force_from_self = false
select_columns = select.map do |sel|
unless col = _hash_key_symbol(sel)
force_from_self = true
break
end
[sel, col]
end
select_columns = nil if force_from_self
end
end
# Use a from_self if this is already a joined table (or from_self specifically disabled for graphs)
if (@opts[:graph_from_self] != false && !graph && (joined_dataset || force_from_self))
from_selfed = true
implicit_qualifier = options[:from_self_alias] || first_source
ds = ds.from_self(:alias=>implicit_qualifier)
end
# Join the table early in order to avoid cloning the dataset twice
ds = ds.join_table(options[:join_type] || :left_outer, table, join_conditions, :table_alias=>table_alias_qualifier, :implicit_qualifier=>implicit_qualifier, :qualify=>options[:qualify], &block)
return ds if options[:join_only]
opts = ds.opts
# Whether to include the table in the result set
add_table = options[:select] == false ? false : true
if graph
graph = graph.dup
select = opts[:select].dup
[:column_aliases, :table_aliases, :column_alias_num].each{|k| graph[k] = graph[k].dup}
else
# Setup the initial graph data structure if it doesn't exist
qualifier = ds.first_source_alias
master = alias_symbol(qualifier)
raise_alias_error.call if master == table_alias
# Master hash storing all .graph related information
graph = {}
# Associates column aliases back to tables and columns
column_aliases = graph[:column_aliases] = {}
# Associates table alias (the master is never aliased)
table_aliases = graph[:table_aliases] = {master=>self}
# Keep track of the alias numbers used
ca_num = graph[:column_alias_num] = Hash.new(0)
select = if select_columns
select_columns.map do |sel, column|
column_aliases[column] = [master, column]
if from_selfed
# Initial dataset was wrapped in subselect, selected all
# columns in the subselect, qualified by the subselect alias.
Sequel.qualify(qualifier, Sequel.identifier(column))
else
# Initial dataset not wrapped in subslect, just make
# sure columns are qualified in some way.
qualified_expression(sel, qualifier)
end
end
else
columns.map do |column|
column_aliases[column] = [master, column]
SQL::QualifiedIdentifier.new(qualifier, column)
end
end
end
# Add the table alias to the list of aliases
# Even if it isn't been used in the result set,
# we add a key for it with a nil value so we can check if it
# is used more than once
table_aliases = graph[:table_aliases]
table_aliases[table_alias] = add_table ? dataset : nil
# Add the columns to the selection unless we are ignoring them
if add_table
column_aliases = graph[:column_aliases]
ca_num = graph[:column_alias_num]
# Which columns to add to the result set
cols = options[:select] || dataset.columns
# If the column hasn't been used yet, don't alias it.
# If it has been used, try table_column.
# If that has been used, try table_column_N
# using the next value of N that we know hasn't been
# used
cols.each do |column|
col_alias, identifier = if column_aliases[column]
column_alias = :"#{table_alias}_#{column}"
if column_aliases[column_alias]
column_alias_num = ca_num[column_alias]
column_alias = :"#{column_alias}_#{column_alias_num}"
ca_num[column_alias] += 1
end
[column_alias, SQL::AliasedExpression.new(SQL::QualifiedIdentifier.new(table_alias_qualifier, column), column_alias)]
else
ident = SQL::QualifiedIdentifier.new(table_alias_qualifier, column)
[column, ident]
end
column_aliases[col_alias] = [table_alias, column].freeze
select.push(identifier)
end
end
[:column_aliases, :table_aliases, :column_alias_num].each{|k| graph[k].freeze}
ds = ds.clone(:graph=>graph.freeze)
ds.select(*select)
end
# This allows you to manually specify the graph aliases to use
# when using graph. You can use it to only select certain
# columns, and have those columns mapped to specific aliases
# in the result set. This is the equivalent of +select+ for a
# graphed dataset, and must be used instead of +select+ whenever
# graphing is used.
#
# graph_aliases should be a hash with keys being symbols of
# column aliases, and values being either symbols or arrays with one to three elements.
# If the value is a symbol, it is assumed to be the same as a one element
# array containing that symbol.
# The first element of the array should be the table alias symbol.
# The second should be the actual column name symbol. If the array only
# has a single element the column name symbol will be assumed to be the
# same as the corresponding hash key. If the array
# has a third element, it is used as the value returned, instead of
# table_alias.column_name.
#
# DB[:artists].graph(:albums, :artist_id: :id).
# set_graph_aliases(name: :artists,
# album_name: [:albums, :name],
# forty_two: [:albums, :fourtwo, 42]).first
# # SELECT artists.name, albums.name AS album_name, 42 AS forty_two ...
def set_graph_aliases(graph_aliases)
columns, graph_aliases = graph_alias_columns(graph_aliases)
if graph = opts[:graph]
select(*columns).clone(:graph => graph.merge(:column_aliases=>graph_aliases.freeze).freeze)
else
raise Error, "cannot call #set_graph_aliases on an ungraphed dataset"
end
end
# Remove the splitting of results into subhashes, and all metadata
# related to the current graph (if any).
def ungraphed
clone(:graph=>nil)
end
private
# Wrap the alias symbol in an SQL::Identifier if the identifier on which is based
# is an SQL::Identifier. This works around cases where symbol splitting is enabled and the alias symbol contains
# double embedded underscores which would be considered an implicit qualified identifier
# if not wrapped in an SQL::Identifier.
def qualifier_from_alias_symbol(aliaz, identifier)
case identifier
when SQL::QualifiedIdentifier
if identifier.column.is_a?(String)
Sequel.identifier(aliaz)
else
aliaz
end
when SQL::Identifier
Sequel.identifier(aliaz)
else
aliaz
end
end
# Transform the hash of graph aliases and return a two element array
# where the first element is an array of identifiers suitable to pass to
# a select method, and the second is a new hash of preprocessed graph aliases.
def graph_alias_columns(graph_aliases)
gas = {}
identifiers = graph_aliases.map do |col_alias, tc|
table, column, value = Array(tc)
column ||= col_alias
gas[col_alias] = [table, column].freeze
identifier = value || SQL::QualifiedIdentifier.new(table, column)
identifier = SQL::AliasedExpression.new(identifier, col_alias) if value || column != col_alias
identifier
end
[identifiers, gas]
end
end
end
sequel-5.63.0/lib/sequel/dataset/misc.rb 0000664 0000000 0000000 00000026635 14342141206 0020065 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 6 - Miscellaneous methods
# These methods don't fit cleanly into another section.
# ---------------------
# The database related to this dataset. This is the Database instance that
# will execute all of this dataset's queries.
attr_reader :db
# The hash of options for this dataset, keys are symbols.
attr_reader :opts
# Constructs a new Dataset instance with an associated database and
# options. Datasets are usually constructed by invoking the Database#[] method:
#
# DB[:posts]
#
# Sequel::Dataset is an abstract class that is not useful by itself. Each
# database adapter provides a subclass of Sequel::Dataset, and has
# the Database#dataset method return an instance of that subclass.
def initialize(db)
@db = db
@opts = OPTS
@cache = {}
freeze
end
# Define a hash value such that datasets with the same class, DB, and opts
# will be considered equal.
def ==(o)
o.is_a?(self.class) && db == o.db && opts == o.opts
end
# An object representing the current date or time, should be an instance
# of Sequel.datetime_class.
def current_datetime
Sequel.datetime_class.now
end
# Alias for ==
def eql?(o)
self == o
end
# Return self, as datasets are always frozen.
def dup
self
end
# Yield a dataset for each server in the connection pool that is tied to that server.
# Intended for use in sharded environments where all servers need to be modified
# with the same data:
#
# DB[:configs].where(key: 'setting').each_server{|ds| ds.update(value: 'new_value')}
def each_server
db.servers.each{|s| yield server(s)}
end
# Returns the string with the LIKE metacharacters (% and _) escaped.
# Useful for when the LIKE term is a user-provided string where metacharacters should not
# be recognized. Example:
#
# ds.escape_like("foo\\%_") # 'foo\\\%\_'
def escape_like(string)
string.gsub(/[\\%_]/){|m| "\\#{m}"}
end
if TRUE_FREEZE
# Freeze the opts when freezing the dataset.
def freeze
@opts.freeze
super
end
else
# :nocov:
def freeze # :nodoc:
self
end
def frozen? # :nodoc:
true
end
# :nocov:
end
# Alias of +first_source_alias+
def first_source
first_source_alias
end
# The first source (primary table) for this dataset. If the dataset doesn't
# have a table, raises an +Error+. If the table is aliased, returns the aliased name.
#
# DB[:table].first_source_alias
# # => :table
#
# DB[Sequel[:table].as(:t)].first_source_alias
# # => :t
def first_source_alias
source = @opts[:from]
if source.nil? || source.empty?
raise Error, 'No source specified for query'
end
case s = source.first
when SQL::AliasedExpression
s.alias
when Symbol
_, _, aliaz = split_symbol(s)
aliaz ? aliaz.to_sym : s
else
s
end
end
# The first source (primary table) for this dataset. If the dataset doesn't
# have a table, raises an error. If the table is aliased, returns the original
# table, not the alias
#
# DB[:table].first_source_table
# # => :table
#
# DB[Sequel[:table].as(:t)].first_source_table
# # => :table
def first_source_table
source = @opts[:from]
if source.nil? || source.empty?
raise Error, 'No source specified for query'
end
case s = source.first
when SQL::AliasedExpression
s.expression
when Symbol
sch, table, aliaz = split_symbol(s)
aliaz ? (sch ? SQL::QualifiedIdentifier.new(sch, table) : table.to_sym) : s
else
s
end
end
# Define a hash value such that datasets with the same class, DB, and opts,
# will have the same hash value.
def hash
[self.class, db, opts].hash
end
# Returns a string representation of the dataset including the class name
# and the corresponding SQL select statement.
def inspect
"#<#{visible_class_name}: #{sql.inspect}>"
end
# Whether this dataset is a joined dataset (multiple FROM tables or any JOINs).
def joined_dataset?
!!((opts[:from].is_a?(Array) && opts[:from].size > 1) || opts[:join])
end
# The alias to use for the row_number column, used when emulating OFFSET
# support and for eager limit strategies
def row_number_column
:x_sequel_row_number_x
end
# The row_proc for this database, should be any object that responds to +call+ with
# a single hash argument and returns the object you want #each to return.
def row_proc
@opts[:row_proc]
end
# Splits a possible implicit alias in +c+, handling both SQL::AliasedExpressions
# and Symbols. Returns an array of two elements, with the first being the
# main expression, and the second being the alias.
def split_alias(c)
case c
when Symbol
c_table, column, aliaz = split_symbol(c)
[c_table ? SQL::QualifiedIdentifier.new(c_table, column.to_sym) : column.to_sym, aliaz]
when SQL::AliasedExpression
[c.expression, c.alias]
when SQL::JoinClause
[c.table, c.table_alias]
else
[c, nil]
end
end
# This returns an SQL::Identifier or SQL::AliasedExpression containing an
# SQL identifier that represents the unqualified column for the given value.
# The given value should be a Symbol, SQL::Identifier, SQL::QualifiedIdentifier,
# or SQL::AliasedExpression containing one of those. In other cases, this
# returns nil.
def unqualified_column_for(v)
unless v.is_a?(String)
_unqualified_column_for(v)
end
end
# Creates a unique table alias that hasn't already been used in the dataset.
# table_alias can be any type of object accepted by alias_symbol.
# The symbol returned will be the implicit alias in the argument,
# possibly appended with "_N" if the implicit alias has already been
# used, where N is an integer starting at 0 and increasing until an
# unused one is found.
#
# You can provide a second addition array argument containing symbols
# that should not be considered valid table aliases. The current aliases
# for the FROM and JOIN tables are automatically included in this array.
#
# DB[:table].unused_table_alias(:t)
# # => :t
#
# DB[:table].unused_table_alias(:table)
# # => :table_0
#
# DB[:table, :table_0].unused_table_alias(:table)
# # => :table_1
#
# DB[:table, :table_0].unused_table_alias(:table, [:table_1, :table_2])
# # => :table_3
def unused_table_alias(table_alias, used_aliases = [])
table_alias = alias_symbol(table_alias)
used_aliases += opts[:from].map{|t| alias_symbol(t)} if opts[:from]
used_aliases += opts[:join].map{|j| j.table_alias ? alias_alias_symbol(j.table_alias) : alias_symbol(j.table)} if opts[:join]
if used_aliases.include?(table_alias)
i = 0
while true
ta = :"#{table_alias}_#{i}"
return ta unless used_aliases.include?(ta)
i += 1
end
else
table_alias
end
end
# Return a modified dataset with quote_identifiers set.
def with_quote_identifiers(v)
clone(:quote_identifiers=>v, :skip_symbol_cache=>true)
end
protected
# Access the cache for the current dataset. Should be used with caution,
# as access to the cache is not thread safe without a mutex if other
# threads can reference the dataset. Symbol keys prefixed with an
# underscore are reserved for internal use.
attr_reader :cache
# Retreive a value from the dataset's cache in a thread safe manner.
def cache_get(k)
Sequel.synchronize{@cache[k]}
end
# Set a value in the dataset's cache in a thread safe manner.
def cache_set(k, v)
Sequel.synchronize{@cache[k] = v}
end
# Clear the columns hash for the current dataset. This is not a
# thread safe operation, so it should only be used if the dataset
# could not be used by another thread (such as one that was just
# created via clone).
def clear_columns_cache
@cache.delete(:_columns)
end
# The cached columns for the current dataset.
def _columns
cache_get(:_columns)
end
private
# Check the cache for the given key, returning the value.
# Otherwise, yield to get the dataset and cache the dataset under the given key.
def cached_dataset(key)
unless ds = cache_get(key)
ds = yield
cache_set(key, ds)
end
ds
end
# Return a cached placeholder literalizer for the given key if there
# is one for this dataset. If there isn't one, increment the counter
# for the number of calls for the key, and if the counter is at least
# three, then create a placeholder literalizer by yielding to the block,
# and cache it.
def cached_placeholder_literalizer(key)
if loader = cache_get(key)
return loader unless loader.is_a?(Integer)
loader += 1
if loader >= 3
loader = Sequel::Dataset::PlaceholderLiteralizer.loader(self){|pl, _| yield pl}
cache_set(key, loader)
else
cache_set(key, loader + 1)
loader = nil
end
elsif cache_sql? && supports_placeholder_literalizer?
cache_set(key, 1)
end
loader
end
# Return a cached placeholder literalizer for the key, unless where_block is
# nil and where_args is an empty array or hash. This is designed to guard
# against placeholder literalizer use when passing arguments to where
# in the uncached case and filter_expr if a cached placeholder literalizer
# is used.
def cached_where_placeholder_literalizer(where_args, where_block, key, &block)
where_args = where_args[0] if where_args.length == 1
unless where_block
return if where_args == OPTS || where_args == EMPTY_ARRAY
end
cached_placeholder_literalizer(key, &block)
end
# Set the columns for the current dataset.
def columns=(v)
cache_set(:_columns, v)
end
# Set the db, opts, and cache for the copy of the dataset.
def initialize_clone(c, _=nil)
@db = c.db
@opts = Hash[c.opts]
if cols = c.cache_get(:_columns)
@cache = {:_columns=>cols}
else
@cache = {}
end
end
alias initialize_copy initialize_clone
# Internal recursive version of unqualified_column_for, handling Strings inside
# of other objects.
def _unqualified_column_for(v)
case v
when Symbol
_, c, a = Sequel.split_symbol(v)
c = Sequel.identifier(c)
a ? c.as(a) : c
when String
Sequel.identifier(v)
when SQL::Identifier
v
when SQL::QualifiedIdentifier
_unqualified_column_for(v.column)
when SQL::AliasedExpression
if expr = unqualified_column_for(v.expression)
SQL::AliasedExpression.new(expr, v.alias)
end
end
end
# Return the class name for this dataset, but skip anonymous classes
def visible_class_name
c = self.class
c = c.superclass while c.name.nil? || c.name == ''
c.name
end
end
end
sequel-5.63.0/lib/sequel/dataset/placeholder_literalizer.rb 0000664 0000000 0000000 00000017470 14342141206 0024017 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# PlaceholderLiteralizer allows you to record the application of arbitrary changes
# to a dataset with placeholder arguments, recording where those placeholder arguments
# are used in the query. When running the query, the literalization process is much
# faster as Sequel can skip most of the work it normally has to do when literalizing a
# dataset.
#
# Basically, this enables optimizations that allow Sequel to cache the SQL produced
# for a given dataset, so that it doesn't need to recompute that information every
# time.
#
# Example:
#
# loader = Sequel::Dataset::PlaceholderLiteralizer.loader(DB[:items]) do |pl, ds|
# ds.where(id: pl.arg).exclude(name: pl.arg).limit(1)
# end
# loader.first(1, "foo")
# # SELECT * FROM items WHERE ((id = 1) AND (name != 'foo')) LIMIT 1
# loader.first(2, "bar")
# # SELECT * FROM items WHERE ((id = 2) AND (name != 'bar')) LIMIT 1
#
# Caveats:
#
# Note that this method does not handle all possible cases. For example:
#
# loader = Sequel::Dataset::PlaceholderLiteralizer.loader(DB[:items]) do |pl, ds|
# ds.join(pl.arg, item_id: :id)
# end
# loader.all(:cart_items)
#
# Will not qualify the item_id column with cart_items. In this type of situation it's
# best to add a table alias when joining:
#
# loader = Sequel::Dataset::PlaceholderLiteralizer.loader(DB[:items]) do |pl, ds|
# ds.join(Sequel.as(pl.arg, :t), item_id: :id)
# end
# loader.all(:cart_items)
#
# There are other similar cases that are not handled, mainly when Sequel changes the
# SQL produced depending on the types of the arguments.
class PlaceholderLiteralizer
# A placeholder argument used by the PlaceholderLiteralizer. This records the offset
# that the argument should be used in the resulting SQL.
class Argument
# Set the recorder, the argument position, and any transforming block to use
# for this placeholder.
def initialize(recorder, pos, transformer=nil)
@recorder = recorder
@pos = pos
@transformer = transformer
freeze
end
# Record the SQL query offset, argument position, and transforming block where the
# argument should be literalized.
def sql_literal_append(ds, sql)
if ds.opts[:placeholder_literal_null]
ds.send(:literal_append, sql, nil)
else
@recorder.use(sql, @pos, @transformer)
end
end
# Return a new Argument object for the same recorder and argument position, but with a
# different transformer block.
def transform(&block)
Argument.new(@recorder, @pos, block)
end
end
# Records the offsets at which the placeholder arguments are used in
# the SQL query.
class Recorder
# Yields the receiver and the dataset to the block, which should
# call #arg on the receiver for each placeholder argument, and
# return the dataset that you want to load.
def loader(dataset, &block)
PlaceholderLiteralizer.new(*process(dataset, &block))
end
# Return an Argument with the specified position, or the next position. In
# general you shouldn't mix calls with an argument and calls without an
# argument for the same receiver.
def arg(v=(no_arg_given = true; @argn+=1))
unless no_arg_given
@argn = v if @argn < v
end
Argument.new(self, v)
end
# Record the offset at which the argument is used in the SQL query, and any
# transforming block.
def use(sql, arg, transformer)
@args << [sql, sql.length, arg, transformer]
end
private
# Return an array with two elements, the first being an
# SQL string with interpolated prepared argument placeholders
# (suitable for inspect), the the second being an array of
# SQL fragments suitable for using for creating a
# Sequel::SQL::PlaceholderLiteralString. Designed for use with
# emulated prepared statements.
def prepared_sql_and_frags(dataset, prepared_args, &block)
_, frags, final_sql, _ = process(dataset, &block)
frags = frags.map(&:first)
prepared_sql = String.new
frags.each_with_index do |sql, i|
prepared_sql << sql
prepared_sql << "$#{prepared_args[i]}"
end
frags << final_sql
prepared_sql << final_sql
[prepared_sql, frags]
end
# Internals of #loader and #prepared_sql_and_frags.
def process(dataset)
@argn = -1
@args = []
ds = yield self, dataset
sql = ds.clone(:placeholder_literalizer=>self).sql
last_offset = 0
fragments = @args.map do |used_sql, offset, arg, t|
raise Error, "placeholder literalizer argument literalized into different string than dataset returned" unless used_sql.equal?(sql)
a = [sql[last_offset...offset], arg, t]
last_offset = offset
a
end
final_sql = sql[last_offset..-1]
arity = @argn+1
[ds, fragments, final_sql, arity]
end
end
# Create a PlaceholderLiteralizer by yielding a Recorder and dataset to the
# given block, recording the offsets at which the recorders arguments
# are used in the query.
def self.loader(dataset, &block)
Recorder.new.loader(dataset, &block)
end
# Save the dataset, array of SQL fragments, and ending SQL string.
def initialize(dataset, fragments, final_sql, arity)
@dataset = dataset
@fragments = fragments
@final_sql = final_sql
@arity = arity
freeze
end
# Freeze the fragments and final SQL when freezing the literalizer.
def freeze
@fragments.freeze
@final_sql.freeze
super
end
# Return a new PlaceholderLiteralizer with a modified dataset. This yields the
# receiver's dataset to the block, and the block should return the new dataset
# to use.
def with_dataset
dataset = yield @dataset
other = dup
other.instance_variable_set(:@dataset, dataset)
other.freeze
end
# Return an array of all objects by running the SQL query for the given arguments.
# If a block is given, yields all objects to the block after loading them.
def all(*args, &block)
@dataset.with_sql_all(sql(*args), &block)
end
# Run the SQL query for the given arguments, yielding each returned row to the block.
def each(*args, &block)
@dataset.with_sql_each(sql(*args), &block)
end
# Run the SQL query for the given arguments, returning the first row.
def first(*args)
@dataset.with_sql_first(sql(*args))
end
# Run the SQL query for the given arguments, returning the first value. For this to
# make sense, the dataset should return a single row with a single value (or no rows).
def get(*args)
@dataset.with_sql_single_value(sql(*args))
end
# Return the SQL query to use for the given arguments.
def sql(*args)
raise Error, "wrong number of arguments (#{args.length} for #{@arity})" unless args.length == @arity
s = String.new
ds = @dataset
@fragments.each do |sql, i, transformer|
s << sql
if i.is_a?(Integer)
v = args.fetch(i)
v = transformer.call(v) if transformer
else
v = i.call
end
ds.literal_append(s, v)
end
s << @final_sql
s
end
end
end
end
sequel-5.63.0/lib/sequel/dataset/prepared_statements.rb 0000664 0000000 0000000 00000034603 14342141206 0023175 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 8 - Methods related to prepared statements or bound variables
# On some adapters, these use native prepared statements and bound variables, on others
# support is emulated. For details, see the {"Prepared Statements/Bound Variables" guide}[rdoc-ref:doc/prepared_statements.rdoc].
# ---------------------
PREPARED_ARG_PLACEHOLDER = LiteralString.new('?').freeze
DEFAULT_PREPARED_STATEMENT_MODULE_METHODS = %w'execute execute_dui execute_insert'.freeze.each(&:freeze)
PREPARED_STATEMENT_MODULE_CODE = {
:bind => "opts = Hash[opts]; opts[:arguments] = bind_arguments".freeze,
:prepare => "sql = prepared_statement_name".freeze,
:prepare_bind => "sql = prepared_statement_name; opts = Hash[opts]; opts[:arguments] = bind_arguments".freeze
}.freeze
def self.prepared_statements_module(code, mods, meths=DEFAULT_PREPARED_STATEMENT_MODULE_METHODS, &block)
code = PREPARED_STATEMENT_MODULE_CODE[code] || code
Module.new do
Array(mods).each do |mod|
include mod
end
if block
module_eval(&block)
end
meths.each do |meth|
module_eval("def #{meth}(sql, opts=Sequel::OPTS) #{code}; super end", __FILE__, __LINE__)
end
private(*meths)
end
end
private_class_method :prepared_statements_module
# Default implementation of the argument mapper to allow
# native database support for bind variables and prepared
# statements (as opposed to the emulated ones used by default).
module ArgumentMapper
# The name of the prepared statement, if any.
def prepared_statement_name
@opts[:prepared_statement_name]
end
# The bind arguments to use for running this prepared statement
def bind_arguments
@opts[:bind_arguments]
end
# Set the bind arguments based on the hash and call super.
def call(bind_vars=OPTS, &block)
sql = prepared_sql
prepared_args.freeze
ps = bind(bind_vars)
ps.clone(:bind_arguments=>ps.map_to_prepared_args(ps.opts[:bind_vars]), :sql=>sql, :prepared_sql=>sql).run(&block)
end
# Override the given *_sql method based on the type, and
# cache the result of the sql.
def prepared_sql
if sql = @opts[:prepared_sql] || cache_get(:_prepared_sql)
return sql
end
cache_set(:_prepared_sql, super)
end
private
# Report that prepared statements are not emulated, since
# all adapters that use this use native prepared statements.
def emulate_prepared_statements?
false
end
end
# Backbone of the prepared statement support. Grafts bind variable
# support into datasets by hijacking #literal and using placeholders.
# By default, emulates prepared statements and bind variables by
# taking the hash of bind variables and directly substituting them
# into the query, which works on all databases, as it is no different
# from using the dataset without bind variables.
module PreparedStatementMethods
# Whether to log the full SQL query. By default, just the prepared statement
# name is generally logged on adapters that support native prepared statements.
def log_sql
@opts[:log_sql]
end
# The type of prepared statement, should be one of :select, :first,
# :insert, :update, :delete, or :single_value
def prepared_type
@opts[:prepared_type]
end
# The array/hash of bound variable placeholder names.
def prepared_args
@opts[:prepared_args]
end
# The dataset that created this prepared statement.
def orig_dataset
@opts[:orig_dataset]
end
# The argument to supply to insert and update, which may use
# placeholders specified by prepared_args
def prepared_modify_values
@opts[:prepared_modify_values]
end
# Sets the prepared_args to the given hash and runs the
# prepared statement.
def call(bind_vars=OPTS, &block)
bind(bind_vars).run(&block)
end
# Raise an error if attempting to call prepare on an already
# prepared statement.
def prepare(*)
raise Error, "cannot prepare an already prepared statement" unless allow_preparing_prepared_statements?
super
end
# Send the columns to the original dataset, as calling it
# on the prepared statement can cause problems.
def columns
orig_dataset.columns
end
# Disallow use of delayed evaluations in prepared statements.
def delayed_evaluation_sql_append(sql, delay)
raise Error, "delayed evaluations cannot be used in prepared statements" if @opts[:no_delayed_evaluations]
super
end
# Returns the SQL for the prepared statement, depending on
# the type of the statement and the prepared_modify_values.
def prepared_sql
case prepared_type
when :select, :all, :each
# Most common scenario, so listed first.
select_sql
when :first, :single_value
clone(:limit=>1).select_sql
when :insert_select
insert_select_sql(*prepared_modify_values)
when :insert, :insert_pk
insert_sql(*prepared_modify_values)
when :update
update_sql(*prepared_modify_values)
when :delete
delete_sql
else
select_sql
end
end
# Changes the values of symbols if they start with $ and
# prepared_args is present. If so, they are considered placeholders,
# and they are substituted using prepared_arg.
def literal_symbol_append(sql, v)
if @opts[:bind_vars] && /\A\$(.*)\z/ =~ v
literal_append(sql, prepared_arg($1.to_sym))
else
super
end
end
# Programmer friendly string showing this is a prepared statement,
# with the prepared SQL it represents (which in general won't have
# substituted variables).
def inspect
"<#{visible_class_name}/PreparedStatement #{prepared_sql.inspect}>"
end
protected
# Run the method based on the type of prepared statement.
def run(&block)
case prepared_type
when :select, :all
all(&block)
when :each
each(&block)
when :insert_select
with_sql(prepared_sql).first
when :first
first
when :insert, :update, :delete
if opts[:returning] && supports_returning?(prepared_type)
returning_fetch_rows(prepared_sql)
elsif prepared_type == :delete
delete
else
public_send(prepared_type, *prepared_modify_values)
end
when :insert_pk
fetch_rows(prepared_sql){|r| return r.values.first}
when Array
# :nocov:
case prepared_type[0]
# :nocov:
when :map, :as_hash, :to_hash, :to_hash_groups
public_send(*prepared_type, &block)
end
when :single_value
single_value
else
raise Error, "unsupported prepared statement type used: #{prepared_type.inspect}"
end
end
private
# Returns the value of the prepared_args hash for the given key.
def prepared_arg(k)
@opts[:bind_vars][k]
end
# The symbol cache should always be skipped, since placeholders are symbols.
def skip_symbol_cache?
true
end
# Use a clone of the dataset extended with prepared statement
# support and using the same argument hash so that you can use
# bind variables/prepared arguments in subselects.
def subselect_sql_append(sql, ds)
subselect_sql_dataset(sql, ds).prepared_sql
end
def subselect_sql_dataset(sql, ds)
super.clone(:prepared_args=>prepared_args, :bind_vars=>@opts[:bind_vars]).
send(:to_prepared_statement, :select, nil, :extend=>prepared_statement_modules)
end
end
# Default implementation for an argument mapper that uses
# unnumbered SQL placeholder arguments. Keeps track of which
# arguments have been used, and allows arguments to
# be used more than once.
module UnnumberedArgumentMapper
include ArgumentMapper
protected
# Returns a single output array mapping the values of the input hash.
# Keys in the input hash that are used more than once in the query
# have multiple entries in the output array.
def map_to_prepared_args(bind_vars)
prepared_args.map{|v| bind_vars[v]}
end
private
# Associates the argument with name k with the next position in
# the output array.
def prepared_arg(k)
prepared_args << k
prepared_arg_placeholder
end
end
# Prepared statements emulation support for adapters that don't
# support native prepared statements. Uses a placeholder
# literalizer to hold the prepared sql with the ability to
# interpolate arguments to prepare the final SQL string.
module EmulatePreparedStatementMethods
include UnnumberedArgumentMapper
def run(&block)
if @opts[:prepared_sql_frags]
sql = literal(Sequel::SQL::PlaceholderLiteralString.new(@opts[:prepared_sql_frags], @opts[:bind_arguments], false))
clone(:prepared_sql_frags=>nil, :sql=>sql, :prepared_sql=>sql).run(&block)
else
super
end
end
private
# Turn emulation of prepared statements back on, since ArgumentMapper
# turns it off.
def emulate_prepared_statements?
true
end
def emulated_prepared_statement(type, name, values)
prepared_sql, frags = Sequel::Dataset::PlaceholderLiteralizer::Recorder.new.send(:prepared_sql_and_frags, self, prepared_args) do |pl, ds|
ds = ds.clone(:recorder=>pl)
case type
when :first, :single_value
ds.limit(1)
when :update, :insert, :insert_select, :delete
ds.with_sql(:"#{type}_sql", *values)
when :insert_pk
ds.with_sql(:insert_sql, *values)
else
ds
end
end
prepared_args.freeze
clone(:prepared_sql_frags=>frags, :prepared_sql=>prepared_sql, :sql=>prepared_sql)
end
# Associates the argument with name k with the next position in
# the output array.
def prepared_arg(k)
prepared_args << k
@opts[:recorder].arg
end
def subselect_sql_dataset(sql, ds)
super.clone(:recorder=>@opts[:recorder]).
with_extend(EmulatePreparedStatementMethods)
end
end
# Set the bind variables to use for the call. If bind variables have
# already been set for this dataset, they are updated with the contents
# of bind_vars.
#
# DB[:table].where(id: :$id).bind(id: 1).call(:first)
# # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
# # => {:id=>1}
def bind(bind_vars=OPTS)
bind_vars = if bv = @opts[:bind_vars]
bv.merge(bind_vars).freeze
else
if bind_vars.frozen?
bind_vars
else
Hash[bind_vars]
end
end
clone(:bind_vars=>bind_vars)
end
# For the given type (:select, :first, :insert, :insert_select, :update, :delete, or :single_value),
# run the sql with the bind variables specified in the hash. +values+ is a hash passed to
# insert or update (if one of those types is used), which may contain placeholders.
#
# DB[:table].where(id: :$id).call(:first, id: 1)
# # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
# # => {:id=>1}
def call(type, bind_variables=OPTS, *values, &block)
to_prepared_statement(type, values, :extend=>bound_variable_modules).call(bind_variables, &block)
end
# Prepare an SQL statement for later execution. Takes a type similar to #call,
# and the +name+ symbol of the prepared statement.
#
# This returns a clone of the dataset extended with PreparedStatementMethods,
# which you can +call+ with the hash of bind variables to use.
# The prepared statement is also stored in
# the associated Database, where it can be called by name.
# The following usage is identical:
#
# ps = DB[:table].where(name: :$name).prepare(:first, :select_by_name)
#
# ps.call(name: 'Blah')
# # SELECT * FROM table WHERE name = ? -- ('Blah')
# # => {:id=>1, :name=>'Blah'}
#
# DB.call(:select_by_name, name: 'Blah') # Same thing
def prepare(type, name, *values)
ps = to_prepared_statement(type, values, :name=>name, :extend=>prepared_statement_modules, :no_delayed_evaluations=>true)
ps = if ps.send(:emulate_prepared_statements?)
ps = ps.with_extend(EmulatePreparedStatementMethods)
ps.send(:emulated_prepared_statement, type, name, values)
else
sql = ps.prepared_sql
ps.prepared_args.freeze
ps.clone(:prepared_sql=>sql, :sql=>sql)
end
db.set_prepared_statement(name, ps)
ps
end
protected
# Return a cloned copy of the current dataset extended with
# PreparedStatementMethods, setting the type and modify values.
def to_prepared_statement(type, values=nil, opts=OPTS)
mods = opts[:extend] || []
mods += [PreparedStatementMethods]
bind.
clone(:prepared_statement_name=>opts[:name], :prepared_type=>type, :prepared_modify_values=>values, :orig_dataset=>self, :no_cache_sql=>true, :prepared_args=>@opts[:prepared_args]||[], :no_delayed_evaluations=>opts[:no_delayed_evaluations]).
with_extend(*mods)
end
private
# Don't allow preparing prepared statements by default.
def allow_preparing_prepared_statements?
false
end
def bound_variable_modules
prepared_statement_modules
end
# Whether prepared statements should be emulated. True by
# default so that adapters have to opt in.
def emulate_prepared_statements?
true
end
def prepared_statement_modules
[]
end
# The argument placeholder. Most databases used unnumbered
# arguments with question marks, so that is the default.
def prepared_arg_placeholder
PREPARED_ARG_PLACEHOLDER
end
end
end
sequel-5.63.0/lib/sequel/dataset/query.rb 0000664 0000000 0000000 00000172107 14342141206 0020273 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 1 - Methods that return modified datasets
# These methods all return modified copies of the receiver.
# ---------------------
# Hash of extension name symbols to callable objects to load the extension
# into the Dataset object (usually by extending it with a module defined
# in the extension).
EXTENSIONS = {}
EMPTY_ARRAY = [].freeze
# The dataset options that require the removal of cached columns if changed.
COLUMN_CHANGE_OPTS = [:select, :sql, :from, :join].freeze
# Which options don't affect the SQL generation. Used by simple_select_all?
# to determine if this is a simple SELECT * FROM table.
NON_SQL_OPTIONS = [:server, :graph, :row_proc, :quote_identifiers, :skip_symbol_cache].freeze
# These symbols have _join methods created (e.g. inner_join) that
# call join_table with the symbol, passing along the arguments and
# block from the method call.
CONDITIONED_JOIN_TYPES = [:inner, :full_outer, :right_outer, :left_outer, :full, :right, :left].freeze
# These symbols have _join methods created (e.g. natural_join).
# They accept a table argument and options hash which is passed to join_table,
# and they raise an error if called with a block.
UNCONDITIONED_JOIN_TYPES = [:natural, :natural_left, :natural_right, :natural_full, :cross].freeze
# All methods that return modified datasets with a joined table added.
JOIN_METHODS = ((CONDITIONED_JOIN_TYPES + UNCONDITIONED_JOIN_TYPES).map{|x| "#{x}_join".to_sym} + [:join, :join_table]).freeze
# Methods that return modified datasets
QUERY_METHODS = ((<<-METHS).split.map(&:to_sym) + JOIN_METHODS).freeze
add_graph_aliases distinct except exclude exclude_having
filter for_update from from_self graph grep group group_and_count group_append group_by having intersect invert
limit lock_style naked offset or order order_append order_by order_more order_prepend qualify
reverse reverse_order select select_all select_append select_group select_more server
set_graph_aliases unfiltered ungraphed ungrouped union
unlimited unordered where with with_recursive with_sql
METHS
# Register an extension callback for Dataset objects. ext should be the
# extension name symbol, and mod should either be a Module that the
# dataset is extended with, or a callable object called with the database
# object. If mod is not provided, a block can be provided and is treated
# as the mod object.
#
# If mod is a module, this also registers a Database extension that will
# extend all of the database's datasets.
def self.register_extension(ext, mod=nil, &block)
if mod
raise(Error, "cannot provide both mod and block to Dataset.register_extension") if block
if mod.is_a?(Module)
block = proc{|ds| ds.extend(mod)}
Sequel::Database.register_extension(ext){|db| db.extend_datasets(mod)}
else
block = mod
end
end
Sequel.synchronize{EXTENSIONS[ext] = block}
end
# On Ruby 2.4+, use clone(freeze: false) to create clones, because
# we use true freezing in that case, and we need to modify the opts
# in the frozen copy.
#
# On Ruby <2.4, just use Object#clone directly, since we don't
# use true freezing as it isn't possible.
if TRUE_FREEZE
# Save original clone implementation, as some other methods need
# to call it internally.
alias _clone clone
private :_clone
# Returns a new clone of the dataset with the given options merged.
# If the options changed include options in COLUMN_CHANGE_OPTS, the cached
# columns are deleted. This method should generally not be called
# directly by user code.
def clone(opts = nil || (return self))
# return self used above because clone is called by almost all
# other query methods, and it is the fastest approach
c = super(:freeze=>false)
c.opts.merge!(opts)
unless opts.each_key{|o| break if COLUMN_CHANGE_OPTS.include?(o)}
c.clear_columns_cache
end
c.freeze
end
else
# :nocov:
def clone(opts = OPTS) # :nodoc:
c = super()
c.opts.merge!(opts)
unless opts.each_key{|o| break if COLUMN_CHANGE_OPTS.include?(o)}
c.clear_columns_cache
end
c.opts.freeze
c
end
# :nocov:
end
# Returns a copy of the dataset with the SQL DISTINCT clause. The DISTINCT
# clause is used to remove duplicate rows from the output. If arguments
# are provided, uses a DISTINCT ON clause, in which case it will only be
# distinct on those columns, instead of all returned columns. If a block
# is given, it is treated as a virtual row block, similar to +where+.
# Raises an error if arguments are given and DISTINCT ON is not supported.
#
# DB[:items].distinct # SQL: SELECT DISTINCT * FROM items
# DB[:items].order(:id).distinct(:id) # SQL: SELECT DISTINCT ON (id) * FROM items ORDER BY id
# DB[:items].order(:id).distinct{func(:id)} # SQL: SELECT DISTINCT ON (func(id)) * FROM items ORDER BY id
#
# There is support for emulating the DISTINCT ON support in MySQL, but it
# does not support the ORDER of the dataset, and also doesn't work in many
# cases if the ONLY_FULL_GROUP_BY sql_mode is used, which is the default on
# MySQL 5.7.5+.
def distinct(*args, &block)
virtual_row_columns(args, block)
if args.empty?
cached_dataset(:_distinct_ds){clone(:distinct => EMPTY_ARRAY)}
else
raise(InvalidOperation, "DISTINCT ON not supported") unless supports_distinct_on?
clone(:distinct => args.freeze)
end
end
# Adds an EXCEPT clause using a second dataset object.
# An EXCEPT compound dataset returns all rows in the current dataset
# that are not in the given dataset.
# Raises an +InvalidOperation+ if the operation is not supported.
# Options:
# :alias :: Use the given value as the from_self alias
# :all :: Set to true to use EXCEPT ALL instead of EXCEPT, so duplicate rows can occur
# :from_self :: Set to false to not wrap the returned dataset in a from_self, use with care.
#
# DB[:items].except(DB[:other_items])
# # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS t1
#
# DB[:items].except(DB[:other_items], all: true, from_self: false)
# # SELECT * FROM items EXCEPT ALL SELECT * FROM other_items
#
# DB[:items].except(DB[:other_items], alias: :i)
# # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS i
def except(dataset, opts=OPTS)
raise(InvalidOperation, "EXCEPT not supported") unless supports_intersect_except?
raise(InvalidOperation, "EXCEPT ALL not supported") if opts[:all] && !supports_intersect_except_all?
compound_clone(:except, dataset, opts)
end
# Performs the inverse of Dataset#where. Note that if you have multiple filter
# conditions, this is not the same as a negation of all conditions.
#
# DB[:items].exclude(category: 'software')
# # SELECT * FROM items WHERE (category != 'software')
#
# DB[:items].exclude(category: 'software', id: 3)
# # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
#
# Also note that SQL uses 3-valued boolean logic (+true+, +false+, +NULL+), so
# the inverse of a true condition is a false condition, and will still
# not match rows that were NULL originally. If you take the earlier
# example:
#
# DB[:items].exclude(category: 'software')
# # SELECT * FROM items WHERE (category != 'software')
#
# Note that this does not match rows where +category+ is +NULL+. This
# is because +NULL+ is an unknown value, and you do not know whether
# or not the +NULL+ category is +software+. You can explicitly
# specify how to handle +NULL+ values if you want:
#
# DB[:items].exclude(Sequel.~(category: nil) & {category: 'software'})
# # SELECT * FROM items WHERE ((category IS NULL) OR (category != 'software'))
def exclude(*cond, &block)
add_filter(:where, cond, true, &block)
end
# Inverts the given conditions and adds them to the HAVING clause.
#
# DB[:items].select_group(:name).exclude_having{count(name) < 2}
# # SELECT name FROM items GROUP BY name HAVING (count(name) >= 2)
#
# See documentation for exclude for how inversion is handled in regards
# to SQL 3-valued boolean logic.
def exclude_having(*cond, &block)
add_filter(:having, cond, true, &block)
end
if TRUE_FREEZE
# Return a clone of the dataset loaded with the given dataset extensions.
# If no related extension file exists or the extension does not have
# specific support for Dataset objects, an Error will be raised.
def extension(*a)
c = _clone(:freeze=>false)
c.send(:_extension!, a)
c.freeze
end
else
# :nocov:
def extension(*exts) # :nodoc:
c = clone
c.send(:_extension!, exts)
c
end
# :nocov:
end
# Alias for where.
def filter(*cond, &block)
where(*cond, &block)
end
# Returns a cloned dataset with a :update lock style.
#
# DB[:table].for_update # SELECT * FROM table FOR UPDATE
def for_update
cached_dataset(:_for_update_ds){lock_style(:update)}
end
# Returns a copy of the dataset with the source changed. If no
# source is given, removes all tables. If multiple sources
# are given, it is the same as using a CROSS JOIN (cartesian product) between all tables.
# If a block is given, it is treated as a virtual row block, similar to +where+.
#
# DB[:items].from # SQL: SELECT *
# DB[:items].from(:blah) # SQL: SELECT * FROM blah
# DB[:items].from(:blah, :foo) # SQL: SELECT * FROM blah, foo
# DB[:items].from{fun(arg)} # SQL: SELECT * FROM fun(arg)
def from(*source, &block)
virtual_row_columns(source, block)
table_alias_num = 0
ctes = nil
source.map! do |s|
case s
when Dataset
if hoist_cte?(s)
ctes ||= []
ctes += s.opts[:with]
s = s.clone(:with=>nil)
end
SQL::AliasedExpression.new(s, dataset_alias(table_alias_num+=1))
when Symbol
sch, table, aliaz = split_symbol(s)
if aliaz
s = sch ? SQL::QualifiedIdentifier.new(sch, table) : SQL::Identifier.new(table)
SQL::AliasedExpression.new(s, aliaz.to_sym)
else
s
end
else
s
end
end
o = {:from=>source.empty? ? nil : source.freeze}
o[:with] = ((opts[:with] || EMPTY_ARRAY) + ctes).freeze if ctes
o[:num_dataset_sources] = table_alias_num if table_alias_num > 0
clone(o)
end
# Returns a dataset selecting from the current dataset.
# Options:
# :alias :: Controls the alias of the table
# :column_aliases :: Also aliases columns, using derived column lists.
# Only used in conjunction with :alias.
#
# ds = DB[:items].order(:name).select(:id, :name)
# # SELECT id,name FROM items ORDER BY name
#
# ds.from_self
# # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS t1
#
# ds.from_self(alias: :foo)
# # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo
#
# ds.from_self(alias: :foo, column_aliases: [:c1, :c2])
# # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo(c1, c2)
def from_self(opts=OPTS)
fs = {}
@opts.keys.each{|k| fs[k] = nil unless non_sql_option?(k)}
pr = proc do
c = clone(fs).from(opts[:alias] ? as(opts[:alias], opts[:column_aliases]) : self)
if cols = _columns
c.send(:columns=, cols)
end
c
end
opts.empty? ? cached_dataset(:_from_self_ds, &pr) : pr.call
end
# Match any of the columns to any of the patterns. The terms can be
# strings (which use LIKE) or regular expressions if the database supports that.
# Note that the total number of pattern matches will be
# Array(columns).length * Array(terms).length,
# which could cause performance issues.
#
# Options (all are boolean):
#
# :all_columns :: All columns must be matched to any of the given patterns.
# :all_patterns :: All patterns must match at least one of the columns.
# :case_insensitive :: Use a case insensitive pattern match (the default is
# case sensitive if the database supports it).
#
# If both :all_columns and :all_patterns are true, all columns must match all patterns.
#
# Examples:
#
# dataset.grep(:a, '%test%')
# # SELECT * FROM items WHERE (a LIKE '%test%' ESCAPE '\')
#
# dataset.grep([:a, :b], %w'%test% foo')
# # SELECT * FROM items WHERE ((a LIKE '%test%' ESCAPE '\') OR (a LIKE 'foo' ESCAPE '\')
# # OR (b LIKE '%test%' ESCAPE '\') OR (b LIKE 'foo' ESCAPE '\'))
#
# dataset.grep([:a, :b], %w'%foo% %bar%', all_patterns: true)
# # SELECT * FROM a WHERE (((a LIKE '%foo%' ESCAPE '\') OR (b LIKE '%foo%' ESCAPE '\'))
# # AND ((a LIKE '%bar%' ESCAPE '\') OR (b LIKE '%bar%' ESCAPE '\')))
#
# dataset.grep([:a, :b], %w'%foo% %bar%', all_columns: true)
# # SELECT * FROM a WHERE (((a LIKE '%foo%' ESCAPE '\') OR (a LIKE '%bar%' ESCAPE '\'))
# # AND ((b LIKE '%foo%' ESCAPE '\') OR (b LIKE '%bar%' ESCAPE '\')))
#
# dataset.grep([:a, :b], %w'%foo% %bar%', all_patterns: true, all_columns: true)
# # SELECT * FROM a WHERE ((a LIKE '%foo%' ESCAPE '\') AND (b LIKE '%foo%' ESCAPE '\')
# # AND (a LIKE '%bar%' ESCAPE '\') AND (b LIKE '%bar%' ESCAPE '\'))
def grep(columns, patterns, opts=OPTS)
column_op = opts[:all_columns] ? :AND : :OR
if opts[:all_patterns]
conds = Array(patterns).map do |pat|
SQL::BooleanExpression.new(column_op, *Array(columns).map{|c| SQL::StringExpression.like(c, pat, opts)})
end
where(SQL::BooleanExpression.new(:AND, *conds))
else
conds = Array(columns).map do |c|
SQL::BooleanExpression.new(:OR, *Array(patterns).map{|pat| SQL::StringExpression.like(c, pat, opts)})
end
where(SQL::BooleanExpression.new(column_op, *conds))
end
end
# Returns a copy of the dataset with the results grouped by the value of
# the given columns. If a block is given, it is treated
# as a virtual row block, similar to +where+.
#
# DB[:items].group(:id) # SELECT * FROM items GROUP BY id
# DB[:items].group(:id, :name) # SELECT * FROM items GROUP BY id, name
# DB[:items].group{[a, sum(b)]} # SELECT * FROM items GROUP BY a, sum(b)
def group(*columns, &block)
virtual_row_columns(columns, block)
clone(:group => (columns.compact.empty? ? nil : columns.freeze))
end
# Alias of group
def group_by(*columns, &block)
group(*columns, &block)
end
# Returns a dataset grouped by the given column with count by group.
# Column aliases may be supplied, and will be included in the select clause.
# If a block is given, it is treated as a virtual row block, similar to +where+.
#
# Examples:
#
# DB[:items].group_and_count(:name).all
# # SELECT name, count(*) AS count FROM items GROUP BY name
# # => [{:name=>'a', :count=>1}, ...]
#
# DB[:items].group_and_count(:first_name, :last_name).all
# # SELECT first_name, last_name, count(*) AS count FROM items GROUP BY first_name, last_name
# # => [{:first_name=>'a', :last_name=>'b', :count=>1}, ...]
#
# DB[:items].group_and_count(Sequel[:first_name].as(:name)).all
# # SELECT first_name AS name, count(*) AS count FROM items GROUP BY first_name
# # => [{:name=>'a', :count=>1}, ...]
#
# DB[:items].group_and_count{substr(:first_name, 1, 1).as(:initial)}.all
# # SELECT substr(first_name, 1, 1) AS initial, count(*) AS count FROM items GROUP BY substr(first_name, 1, 1)
# # => [{:initial=>'a', :count=>1}, ...]
def group_and_count(*columns, &block)
select_group(*columns, &block).select_append(COUNT_OF_ALL_AS_COUNT)
end
# Returns a copy of the dataset with the given columns added to the list of
# existing columns to group on. If no existing columns are present this
# method simply sets the columns as the initial ones to group on.
#
# DB[:items].group_append(:b) # SELECT * FROM items GROUP BY b
# DB[:items].group(:a).group_append(:b) # SELECT * FROM items GROUP BY a, b
def group_append(*columns, &block)
columns = @opts[:group] + columns if @opts[:group]
group(*columns, &block)
end
# Adds the appropriate CUBE syntax to GROUP BY.
def group_cube
raise Error, "GROUP BY CUBE not supported on #{db.database_type}" unless supports_group_cube?
clone(:group_options=>:cube)
end
# Adds the appropriate ROLLUP syntax to GROUP BY.
def group_rollup
raise Error, "GROUP BY ROLLUP not supported on #{db.database_type}" unless supports_group_rollup?
clone(:group_options=>:rollup)
end
# Adds the appropriate GROUPING SETS syntax to GROUP BY.
def grouping_sets
raise Error, "GROUP BY GROUPING SETS not supported on #{db.database_type}" unless supports_grouping_sets?
clone(:group_options=>:"grouping sets")
end
# Returns a copy of the dataset with the HAVING conditions changed. See #where for argument types.
#
# DB[:items].group(:sum).having(sum: 10)
# # SELECT * FROM items GROUP BY sum HAVING (sum = 10)
def having(*cond, &block)
add_filter(:having, cond, &block)
end
# Adds an INTERSECT clause using a second dataset object.
# An INTERSECT compound dataset returns all rows in both the current dataset
# and the given dataset.
# Raises an +InvalidOperation+ if the operation is not supported.
# Options:
# :alias :: Use the given value as the from_self alias
# :all :: Set to true to use INTERSECT ALL instead of INTERSECT, so duplicate rows can occur
# :from_self :: Set to false to not wrap the returned dataset in a from_self, use with care.
#
# DB[:items].intersect(DB[:other_items])
# # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS t1
#
# DB[:items].intersect(DB[:other_items], all: true, from_self: false)
# # SELECT * FROM items INTERSECT ALL SELECT * FROM other_items
#
# DB[:items].intersect(DB[:other_items], alias: :i)
# # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS i
def intersect(dataset, opts=OPTS)
raise(InvalidOperation, "INTERSECT not supported") unless supports_intersect_except?
raise(InvalidOperation, "INTERSECT ALL not supported") if opts[:all] && !supports_intersect_except_all?
compound_clone(:intersect, dataset, opts)
end
# Inverts the current WHERE and HAVING clauses. If there is neither a
# WHERE or HAVING clause, adds a WHERE clause that is always false.
#
# DB[:items].where(category: 'software').invert
# # SELECT * FROM items WHERE (category != 'software')
#
# DB[:items].where(category: 'software', id: 3).invert
# # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
#
# See documentation for exclude for how inversion is handled in regards
# to SQL 3-valued boolean logic.
def invert
cached_dataset(:_invert_ds) do
having, where = @opts.values_at(:having, :where)
if having.nil? && where.nil?
where(false)
else
o = {}
o[:having] = SQL::BooleanExpression.invert(having) if having
o[:where] = SQL::BooleanExpression.invert(where) if where
clone(o)
end
end
end
# Alias of +inner_join+
def join(*args, &block)
inner_join(*args, &block)
end
# Returns a joined dataset. Not usually called directly, users should use the
# appropriate join method (e.g. join, left_join, natural_join, cross_join) which fills
# in the +type+ argument.
#
# Takes the following arguments:
#
# type :: The type of join to do (e.g. :inner)
# table :: table to join into the current dataset. Generally one of the following types:
# String, Symbol :: identifier used as table or view name
# Dataset :: a subselect is performed with an alias of tN for some value of N
# SQL::Function :: set returning function
# SQL::AliasedExpression :: already aliased expression. Uses given alias unless
# overridden by the :table_alias option.
# expr :: conditions used when joining, depends on type:
# Hash, Array of pairs :: Assumes key (1st arg) is column of joined table (unless already
# qualified), and value (2nd arg) is column of the last joined or
# primary table (or the :implicit_qualifier option).
# To specify multiple conditions on a single joined table column,
# you must use an array. Uses a JOIN with an ON clause.
# Array :: If all members of the array are symbols, considers them as columns and
# uses a JOIN with a USING clause. Most databases will remove duplicate columns from
# the result set if this is used.
# nil :: If a block is not given, doesn't use ON or USING, so the JOIN should be a NATURAL
# or CROSS join. If a block is given, uses an ON clause based on the block, see below.
# otherwise :: Treats the argument as a filter expression, so strings are considered literal, symbols
# specify boolean columns, and Sequel expressions can be used. Uses a JOIN with an ON clause.
# options :: a hash of options, with the following keys supported:
# :table_alias :: Override the table alias used when joining. In general you shouldn't use this
# option, you should provide the appropriate SQL::AliasedExpression as the table
# argument.
# :implicit_qualifier :: The name to use for qualifying implicit conditions. By default,
# the last joined or primary table is used.
# :join_using :: Force the using of JOIN USING, even if +expr+ is not an array of symbols.
# :reset_implicit_qualifier :: Can set to false to ignore this join when future joins determine qualifier
# for implicit conditions.
# :qualify :: Can be set to false to not do any implicit qualification. Can be set
# to :deep to use the Qualifier AST Transformer, which will attempt to qualify
# subexpressions of the expression tree. Can be set to :symbol to only qualify
# symbols. Defaults to the value of default_join_table_qualification.
# block :: The block argument should only be given if a JOIN with an ON clause is used,
# in which case it yields the table alias/name for the table currently being joined,
# the table alias/name for the last joined (or first table), and an array of previous
# SQL::JoinClause. Unlike +where+, this block is not treated as a virtual row block.
#
# Examples:
#
# DB[:a].join_table(:cross, :b)
# # SELECT * FROM a CROSS JOIN b
#
# DB[:a].join_table(:inner, DB[:b], c: d)
# # SELECT * FROM a INNER JOIN (SELECT * FROM b) AS t1 ON (t1.c = a.d)
#
# DB[:a].join_table(:left, Sequel[:b].as(:c), [:d])
# # SELECT * FROM a LEFT JOIN b AS c USING (d)
#
# DB[:a].natural_join(:b).join_table(:inner, :c) do |ta, jta, js|
# (Sequel.qualify(ta, :d) > Sequel.qualify(jta, :e)) & {Sequel.qualify(ta, :f)=>DB.from(js.first.table).select(:g)}
# end
# # SELECT * FROM a NATURAL JOIN b INNER JOIN c
# # ON ((c.d > b.e) AND (c.f IN (SELECT g FROM b)))
def join_table(type, table, expr=nil, options=OPTS, &block)
if hoist_cte?(table)
s, ds = hoist_cte(table)
return s.join_table(type, ds, expr, options, &block)
end
using_join = options[:join_using] || (expr.is_a?(Array) && !expr.empty? && expr.all?{|x| x.is_a?(Symbol)})
if using_join && !supports_join_using?
h = {}
expr.each{|e| h[e] = e}
return join_table(type, table, h, options)
end
table_alias = options[:table_alias]
if table.is_a?(SQL::AliasedExpression)
table_expr = if table_alias
SQL::AliasedExpression.new(table.expression, table_alias, table.columns)
else
table
end
table = table_expr.expression
table_name = table_alias = table_expr.alias
elsif table.is_a?(Dataset)
if table_alias.nil?
table_alias_num = (@opts[:num_dataset_sources] || 0) + 1
table_alias = dataset_alias(table_alias_num)
end
table_name = table_alias
table_expr = SQL::AliasedExpression.new(table, table_alias)
else
table, implicit_table_alias = split_alias(table)
table_alias ||= implicit_table_alias
table_name = table_alias || table
table_expr = table_alias ? SQL::AliasedExpression.new(table, table_alias) : table
end
join = if expr.nil? and !block
SQL::JoinClause.new(type, table_expr)
elsif using_join
raise(Sequel::Error, "can't use a block if providing an array of symbols as expr") if block
SQL::JoinUsingClause.new(expr, type, table_expr)
else
last_alias = options[:implicit_qualifier] || @opts[:last_joined_table] || first_source_alias
qualify_type = options[:qualify]
if Sequel.condition_specifier?(expr)
expr = expr.map do |k, v|
qualify_type = default_join_table_qualification if qualify_type.nil?
case qualify_type
when false
nil # Do no qualification
when :deep
k = Sequel::Qualifier.new(table_name).transform(k)
v = Sequel::Qualifier.new(last_alias).transform(v)
else
k = qualified_column_name(k, table_name) if k.is_a?(Symbol)
v = qualified_column_name(v, last_alias) if v.is_a?(Symbol)
end
[k,v]
end
expr = SQL::BooleanExpression.from_value_pairs(expr)
end
if block
expr2 = yield(table_name, last_alias, @opts[:join] || EMPTY_ARRAY)
expr = expr ? SQL::BooleanExpression.new(:AND, expr, expr2) : expr2
end
SQL::JoinOnClause.new(expr, type, table_expr)
end
opts = {:join => ((@opts[:join] || EMPTY_ARRAY) + [join]).freeze}
opts[:last_joined_table] = table_name unless options[:reset_implicit_qualifier] == false
opts[:num_dataset_sources] = table_alias_num if table_alias_num
clone(opts)
end
CONDITIONED_JOIN_TYPES.each do |jtype|
class_eval("def #{jtype}_join(*args, &block); join_table(:#{jtype}, *args, &block) end", __FILE__, __LINE__)
end
UNCONDITIONED_JOIN_TYPES.each do |jtype|
class_eval(<<-END, __FILE__, __LINE__+1)
def #{jtype}_join(table, opts=Sequel::OPTS)
raise(Sequel::Error, '#{jtype}_join does not accept join table blocks') if defined?(yield)
raise(Sequel::Error, '#{jtype}_join 2nd argument should be an options hash, not conditions') unless opts.is_a?(Hash)
join_table(:#{jtype}, table, nil, opts)
end
END
end
# Marks this dataset as a lateral dataset. If used in another dataset's FROM
# or JOIN clauses, it will surround the subquery with LATERAL to enable it
# to deal with previous tables in the query:
#
# DB.from(:a, DB[:b].where(Sequel[:a][:c]=>Sequel[:b][:d]).lateral)
# # SELECT * FROM a, LATERAL (SELECT * FROM b WHERE (a.c = b.d))
def lateral
cached_dataset(:_lateral_ds){clone(:lateral=>true)}
end
# If given an integer, the dataset will contain only the first l results.
# If given a range, it will contain only those at offsets within that
# range. If a second argument is given, it is used as an offset. To use
# an offset without a limit, pass nil as the first argument.
#
# DB[:items].limit(10) # SELECT * FROM items LIMIT 10
# DB[:items].limit(10, 20) # SELECT * FROM items LIMIT 10 OFFSET 20
# DB[:items].limit(10...20) # SELECT * FROM items LIMIT 10 OFFSET 10
# DB[:items].limit(10..20) # SELECT * FROM items LIMIT 11 OFFSET 10
# DB[:items].limit(nil, 20) # SELECT * FROM items OFFSET 20
def limit(l, o = (no_offset = true; nil))
return from_self.limit(l, o) if @opts[:sql]
if l.is_a?(Range)
no_offset = false
o = l.first
l = l.last - l.first + (l.exclude_end? ? 0 : 1)
end
l = l.to_i if l.is_a?(String) && !l.is_a?(LiteralString)
if l.is_a?(Integer)
raise(Error, 'Limits must be greater than or equal to 1') unless l >= 1
end
ds = clone(:limit=>l)
ds = ds.offset(o) unless no_offset
ds
end
# Returns a cloned dataset with the given lock style. If style is a
# string, it will be used directly. You should never pass a string
# to this method that is derived from user input, as that can lead to
# SQL injection.
#
# A symbol may be used for database independent locking behavior, but
# all supported symbols have separate methods (e.g. for_update).
#
# DB[:items].lock_style('FOR SHARE NOWAIT')
# # SELECT * FROM items FOR SHARE NOWAIT
# DB[:items].lock_style('FOR UPDATE OF table1 SKIP LOCKED')
# # SELECT * FROM items FOR UPDATE OF table1 SKIP LOCKED
def lock_style(style)
clone(:lock => style)
end
# Return a dataset with a WHEN MATCHED THEN DELETE clause added to the
# MERGE statement. If a block is passed, treat it as a virtual row and
# use it as additional conditions for the match.
#
# merge_delete
# # WHEN MATCHED THEN DELETE
#
# merge_delete{a > 30}
# # WHEN MATCHED AND (a > 30) THEN DELETE
def merge_delete(&block)
_merge_when(:type=>:delete, &block)
end
# Return a dataset with a WHEN NOT MATCHED THEN INSERT clause added to the
# MERGE statement. If a block is passed, treat it as a virtual row and
# use it as additional conditions for the match.
#
# The arguments provided can be any arguments that would be accepted by
# #insert.
#
# merge_insert(i1: :i2, a: Sequel[:b]+11)
# # WHEN NOT MATCHED THEN INSERT (i1, a) VALUES (i2, (b + 11))
#
# merge_insert(:i2, Sequel[:b]+11){a > 30}
# # WHEN NOT MATCHED AND (a > 30) THEN INSERT VALUES (i2, (b + 11))
def merge_insert(*values, &block)
_merge_when(:type=>:insert, :values=>values, &block)
end
# Return a dataset with a WHEN MATCHED THEN UPDATE clause added to the
# MERGE statement. If a block is passed, treat it as a virtual row and
# use it as additional conditions for the match.
#
# merge_update(i1: Sequel[:i1]+:i2+10, a: Sequel[:a]+:b+20)
# # WHEN MATCHED THEN UPDATE SET i1 = (i1 + i2 + 10), a = (a + b + 20)
#
# merge_update(i1: :i2){a > 30}
# # WHEN MATCHED AND (a > 30) THEN UPDATE SET i1 = i2
def merge_update(values, &block)
_merge_when(:type=>:update, :values=>values, &block)
end
# Return a dataset with the source and join condition to use for the MERGE statement.
#
# merge_using(:m2, i1: :i2)
# # USING m2 ON (i1 = i2)
def merge_using(source, join_condition)
clone(:merge_using => [source, join_condition].freeze)
end
# Returns a cloned dataset without a row_proc.
#
# ds = DB[:items].with_row_proc(:invert.to_proc)
# ds.all # => [{2=>:id}]
# ds.naked.all # => [{:id=>2}]
def naked
cached_dataset(:_naked_ds){with_row_proc(nil)}
end
# Returns a copy of the dataset that will raise a DatabaseLockTimeout instead
# of waiting for rows that are locked by another transaction
#
# DB[:items].for_update.nowait
# # SELECT * FROM items FOR UPDATE NOWAIT
def nowait
cached_dataset(:_nowait_ds) do
raise(Error, 'This dataset does not support raises errors instead of waiting for locked rows') unless supports_nowait?
clone(:nowait=>true)
end
end
# Returns a copy of the dataset with a specified order. Can be safely combined with limit.
# If you call limit with an offset, it will override the offset if you've called
# offset first.
#
# DB[:items].offset(10) # SELECT * FROM items OFFSET 10
def offset(o)
o = o.to_i if o.is_a?(String) && !o.is_a?(LiteralString)
if o.is_a?(Integer)
raise(Error, 'Offsets must be greater than or equal to 0') unless o >= 0
end
clone(:offset => o)
end
# Adds an alternate filter to an existing WHERE clause using OR. If there
# is no WHERE clause, then the default is WHERE true, and OR would be redundant,
# so return the dataset in that case.
#
# DB[:items].where(:a).or(:b) # SELECT * FROM items WHERE a OR b
# DB[:items].or(:b) # SELECT * FROM items
def or(*cond, &block)
if @opts[:where].nil?
self
else
add_filter(:where, cond, false, :OR, &block)
end
end
# Returns a copy of the dataset with the order changed. If the dataset has an
# existing order, it is ignored and overwritten with this order. If a nil is given
# the returned dataset has no order. This can accept multiple arguments
# of varying kinds, such as SQL functions. If a block is given, it is treated
# as a virtual row block, similar to +where+.
#
# DB[:items].order(:name) # SELECT * FROM items ORDER BY name
# DB[:items].order(:a, :b) # SELECT * FROM items ORDER BY a, b
# DB[:items].order(Sequel.lit('a + b')) # SELECT * FROM items ORDER BY a + b
# DB[:items].order(Sequel[:a] + :b) # SELECT * FROM items ORDER BY (a + b)
# DB[:items].order(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name DESC
# DB[:items].order(Sequel.asc(:name, nulls: :last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST
# DB[:items].order{sum(name).desc} # SELECT * FROM items ORDER BY sum(name) DESC
# DB[:items].order(nil) # SELECT * FROM items
def order(*columns, &block)
virtual_row_columns(columns, block)
clone(:order => (columns.compact.empty?) ? nil : columns.freeze)
end
# Returns a copy of the dataset with the order columns added
# to the end of the existing order.
#
# DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b
# DB[:items].order(:a).order_append(:b) # SELECT * FROM items ORDER BY a, b
def order_append(*columns, &block)
columns = @opts[:order] + columns if @opts[:order]
order(*columns, &block)
end
# Alias of order
def order_by(*columns, &block)
order(*columns, &block)
end
# Alias of order_append.
def order_more(*columns, &block)
order_append(*columns, &block)
end
# Returns a copy of the dataset with the order columns added
# to the beginning of the existing order.
#
# DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b
# DB[:items].order(:a).order_prepend(:b) # SELECT * FROM items ORDER BY b, a
def order_prepend(*columns, &block)
ds = order(*columns, &block)
@opts[:order] ? ds.order_append(*@opts[:order]) : ds
end
# Qualify to the given table, or first source if no table is given.
#
# DB[:items].where(id: 1).qualify
# # SELECT items.* FROM items WHERE (items.id = 1)
#
# DB[:items].where(id: 1).qualify(:i)
# # SELECT i.* FROM items WHERE (i.id = 1)
def qualify(table=(cache=true; first_source))
o = @opts
return self if o[:sql]
pr = proc do
h = {}
(o.keys & QUALIFY_KEYS).each do |k|
h[k] = qualified_expression(o[k], table)
end
h[:select] = [SQL::ColumnAll.new(table)].freeze if !o[:select] || o[:select].empty?
clone(h)
end
cache ? cached_dataset(:_qualify_ds, &pr) : pr.call
end
# Modify the RETURNING clause, only supported on a few databases. If returning
# is used, instead of insert returning the autogenerated primary key or
# update/delete returning the number of modified rows, results are
# returned using +fetch_rows+.
#
# DB[:items].returning # RETURNING *
# DB[:items].returning(nil) # RETURNING NULL
# DB[:items].returning(:id, :name) # RETURNING id, name
#
# DB[:items].returning.insert(a: 1) do |hash|
# # hash for each row inserted, with values for all columns
# end
# DB[:items].returning.update(a: 1) do |hash|
# # hash for each row updated, with values for all columns
# end
# DB[:items].returning.delete(a: 1) do |hash|
# # hash for each row deleted, with values for all columns
# end
def returning(*values)
if values.empty?
cached_dataset(:_returning_ds) do
raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
clone(:returning=>EMPTY_ARRAY)
end
else
raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert)
clone(:returning=>values.freeze)
end
end
# Returns a copy of the dataset with the order reversed. If no order is
# given, the existing order is inverted.
#
# DB[:items].reverse(:id) # SELECT * FROM items ORDER BY id DESC
# DB[:items].reverse{foo(bar)} # SELECT * FROM items ORDER BY foo(bar) DESC
# DB[:items].order(:id).reverse # SELECT * FROM items ORDER BY id DESC
# DB[:items].order(:id).reverse(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name ASC
def reverse(*order, &block)
if order.empty? && !block
cached_dataset(:_reverse_ds){order(*invert_order(@opts[:order]))}
else
virtual_row_columns(order, block)
order(*invert_order(order.empty? ? @opts[:order] : order.freeze))
end
end
# Alias of +reverse+
def reverse_order(*order, &block)
reverse(*order, &block)
end
# Returns a copy of the dataset with the columns selected changed
# to the given columns. This also takes a virtual row block,
# similar to +where+.
#
# DB[:items].select(:a) # SELECT a FROM items
# DB[:items].select(:a, :b) # SELECT a, b FROM items
# DB[:items].select{[a, sum(b)]} # SELECT a, sum(b) FROM items
def select(*columns, &block)
virtual_row_columns(columns, block)
clone(:select => columns.freeze)
end
# Returns a copy of the dataset selecting the wildcard if no arguments
# are given. If arguments are given, treat them as tables and select
# all columns (using the wildcard) from each table.
#
# DB[:items].select(:a).select_all # SELECT * FROM items
# DB[:items].select_all(:items) # SELECT items.* FROM items
# DB[:items].select_all(:items, :foo) # SELECT items.*, foo.* FROM items
def select_all(*tables)
if tables.empty?
cached_dataset(:_select_all_ds){clone(:select => nil)}
else
select(*tables.map{|t| i, a = split_alias(t); a || i}.map!{|t| SQL::ColumnAll.new(t)}.freeze)
end
end
# Returns a copy of the dataset with the given columns added
# to the existing selected columns. If no columns are currently selected,
# it will select the columns given in addition to *.
#
# DB[:items].select(:a).select(:b) # SELECT b FROM items
# DB[:items].select(:a).select_append(:b) # SELECT a, b FROM items
# DB[:items].select_append(:b) # SELECT *, b FROM items
def select_append(*columns, &block)
cur_sel = @opts[:select]
if !cur_sel || cur_sel.empty?
unless supports_select_all_and_column?
return select_all(*(Array(@opts[:from]) + Array(@opts[:join]))).select_append(*columns, &block)
end
cur_sel = [WILDCARD]
end
select(*(cur_sel + columns), &block)
end
# Set both the select and group clauses with the given +columns+.
# Column aliases may be supplied, and will be included in the select clause.
# This also takes a virtual row block similar to +where+.
#
# DB[:items].select_group(:a, :b)
# # SELECT a, b FROM items GROUP BY a, b
#
# DB[:items].select_group(Sequel[:c].as(:a)){f(c2)}
# # SELECT c AS a, f(c2) FROM items GROUP BY c, f(c2)
def select_group(*columns, &block)
virtual_row_columns(columns, block)
select(*columns).group(*columns.map{|c| unaliased_identifier(c)})
end
# Alias for select_append.
def select_more(*columns, &block)
select_append(*columns, &block)
end
# Set the server for this dataset to use. Used to pick a specific database
# shard to run a query against, or to override the default (where SELECT uses
# :read_only database and all other queries use the :default database). This
# method is always available but is only useful when database sharding is being
# used.
#
# DB[:items].all # Uses the :read_only or :default server
# DB[:items].delete # Uses the :default server
# DB[:items].server(:blah).delete # Uses the :blah server
def server(servr)
clone(:server=>servr)
end
# If the database uses sharding and the current dataset has not had a
# server set, return a cloned dataset that uses the given server.
# Otherwise, return the receiver directly instead of returning a clone.
def server?(server)
if db.sharded? && !opts[:server]
server(server)
else
self
end
end
# Specify that the check for limits/offsets when updating/deleting be skipped for the dataset.
def skip_limit_check
cached_dataset(:_skip_limit_check_ds) do
clone(:skip_limit_check=>true)
end
end
# Skip locked rows when returning results from this dataset.
def skip_locked
cached_dataset(:_skip_locked_ds) do
raise(Error, 'This dataset does not support skipping locked rows') unless supports_skip_locked?
clone(:skip_locked=>true)
end
end
# Returns a copy of the dataset with no filters (HAVING or WHERE clause) applied.
#
# DB[:items].group(:a).having(a: 1).where(:b).unfiltered
# # SELECT * FROM items GROUP BY a
def unfiltered
cached_dataset(:_unfiltered_ds){clone(:where => nil, :having => nil)}
end
# Returns a copy of the dataset with no grouping (GROUP or HAVING clause) applied.
#
# DB[:items].group(:a).having(a: 1).where(:b).ungrouped
# # SELECT * FROM items WHERE b
def ungrouped
cached_dataset(:_ungrouped_ds){clone(:group => nil, :having => nil)}
end
# Adds a UNION clause using a second dataset object.
# A UNION compound dataset returns all rows in either the current dataset
# or the given dataset.
# Options:
# :alias :: Use the given value as the from_self alias
# :all :: Set to true to use UNION ALL instead of UNION, so duplicate rows can occur
# :from_self :: Set to false to not wrap the returned dataset in a from_self, use with care.
#
# DB[:items].union(DB[:other_items])
# # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS t1
#
# DB[:items].union(DB[:other_items], all: true, from_self: false)
# # SELECT * FROM items UNION ALL SELECT * FROM other_items
#
# DB[:items].union(DB[:other_items], alias: :i)
# # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS i
def union(dataset, opts=OPTS)
compound_clone(:union, dataset, opts)
end
# Returns a copy of the dataset with no limit or offset.
#
# DB[:items].limit(10, 20).unlimited # SELECT * FROM items
def unlimited
cached_dataset(:_unlimited_ds){clone(:limit=>nil, :offset=>nil)}
end
# Returns a copy of the dataset with no order.
#
# DB[:items].order(:a).unordered # SELECT * FROM items
def unordered
cached_dataset(:_unordered_ds){clone(:order=>nil)}
end
# Returns a copy of the dataset with the given WHERE conditions imposed upon it.
#
# Accepts the following argument types:
#
# Hash, Array of pairs :: list of equality/inclusion expressions
# Symbol :: taken as a boolean column argument (e.g. WHERE active)
# Sequel::SQL::BooleanExpression, Sequel::LiteralString :: an existing condition expression, probably created
# using the Sequel expression filter DSL.
#
# where also accepts a block, which should return one of the above argument
# types, and is treated the same way. This block yields a virtual row object,
# which is easy to use to create identifiers and functions. For more details
# on the virtual row support, see the {"Virtual Rows" guide}[rdoc-ref:doc/virtual_rows.rdoc]
#
# If both a block and regular argument are provided, they get ANDed together.
#
# Examples:
#
# DB[:items].where(id: 3)
# # SELECT * FROM items WHERE (id = 3)
#
# DB[:items].where(Sequel.lit('price < ?', 100))
# # SELECT * FROM items WHERE price < 100
#
# DB[:items].where([[:id, [1,2,3]], [:id, 0..10]])
# # SELECT * FROM items WHERE ((id IN (1, 2, 3)) AND ((id >= 0) AND (id <= 10)))
#
# DB[:items].where(Sequel.lit('price < 100'))
# # SELECT * FROM items WHERE price < 100
#
# DB[:items].where(:active)
# # SELECT * FROM items WHERE :active
#
# DB[:items].where{price < 100}
# # SELECT * FROM items WHERE (price < 100)
#
# Multiple where calls can be chained for scoping:
#
# software = dataset.where(category: 'software').where{price < 100}
# # SELECT * FROM items WHERE ((category = 'software') AND (price < 100))
#
# See the {"Dataset Filtering" guide}[rdoc-ref:doc/dataset_filtering.rdoc] for more examples and details.
def where(*cond, &block)
add_filter(:where, cond, &block)
end
# Return a clone of the dataset with an addition named window that can be
# referenced in window functions. See Sequel::SQL::Window for a list of
# options that can be passed in. Example:
#
# DB[:items].window(:w, partition: :c1, order: :c2)
# # SELECT * FROM items WINDOW w AS (PARTITION BY c1 ORDER BY c2)
def window(name, opts)
clone(:window=>((@opts[:window]||EMPTY_ARRAY) + [[name, SQL::Window.new(opts)].freeze]).freeze)
end
# Add a common table expression (CTE) with the given name and a dataset that defines the CTE.
# A common table expression acts as an inline view for the query.
#
# Options:
# :args :: Specify the arguments/columns for the CTE, should be an array of symbols.
# :recursive :: Specify that this is a recursive CTE
# :materialized :: Set to false to force inlining of the CTE, or true to force not inlining
# the CTE (PostgreSQL 12+/SQLite 3.35+).
#
# DB[:items].with(:items, DB[:syx].where(Sequel[:name].like('A%')))
# # WITH items AS (SELECT * FROM syx WHERE (name LIKE 'A%' ESCAPE '\')) SELECT * FROM items
def with(name, dataset, opts=OPTS)
raise(Error, 'This dataset does not support common table expressions') unless supports_cte?
if hoist_cte?(dataset)
s, ds = hoist_cte(dataset)
s.with(name, ds, opts)
else
clone(:with=>((@opts[:with]||EMPTY_ARRAY) + [Hash[opts].merge!(:name=>name, :dataset=>dataset)]).freeze)
end
end
# Add a recursive common table expression (CTE) with the given name, a dataset that
# defines the nonrecursive part of the CTE, and a dataset that defines the recursive part
# of the CTE.
#
# Options:
# :args :: Specify the arguments/columns for the CTE, should be an array of symbols.
# :union_all :: Set to false to use UNION instead of UNION ALL combining the nonrecursive and recursive parts.
#
# PostgreSQL 14+ Options:
# :cycle :: Stop recursive searching when a cycle is detected. Includes two columns in the
# result of the CTE, a cycle column indicating whether a cycle was detected for
# the current row, and a path column for the path traversed to get to the current
# row. If given, must be a hash with the following keys:
# :columns :: (required) The column or array of columns to use to detect a cycle.
# If the value of these columns match columns already traversed, then
# a cycle is detected, and recursive searching will not traverse beyond
# the cycle (the CTE will include the row where the cycle was detected).
# :cycle_column :: The name of the cycle column in the output, defaults to :is_cycle.
# :cycle_value :: The value of the cycle column in the output if the current row was
# detected as a cycle, defaults to true.
# :noncycle_value :: The value of the cycle column in the output if the current row
# was not detected as a cycle, defaults to false. Only respected
# if :cycle_value is given.
# :path_column :: The name of the path column in the output, defaults to :path.
# :search :: Include an order column in the result of the CTE that allows for breadth or
# depth first searching. If given, must be a hash with the following keys:
# :by :: (required) The column or array of columns to search by.
# :order_column :: The name of the order column in the output, defaults to :ordercol.
# :type :: Set to :breadth to use breadth-first searching (depth-first searching
# is the default).
#
# DB[:t].with_recursive(:t,
# DB[:i1].select(:id, :parent_id).where(parent_id: nil),
# DB[:i1].join(:t, id: :parent_id).select(Sequel[:i1][:id], Sequel[:i1][:parent_id]),
# args: [:id, :parent_id])
#
# # WITH RECURSIVE t(id, parent_id) AS (
# # SELECT id, parent_id FROM i1 WHERE (parent_id IS NULL)
# # UNION ALL
# # SELECT i1.id, i1.parent_id FROM i1 INNER JOIN t ON (t.id = i1.parent_id)
# # ) SELECT * FROM t
#
# DB[:t].with_recursive(:t,
# DB[:i1].where(parent_id: nil),
# DB[:i1].join(:t, id: :parent_id).select_all(:i1),
# search: {by: :id, type: :breadth},
# cycle: {columns: :id, cycle_value: 1, noncycle_value: 2})
#
# # WITH RECURSIVE t AS (
# # SELECT * FROM i1 WHERE (parent_id IS NULL)
# # UNION ALL
# # (SELECT i1.* FROM i1 INNER JOIN t ON (t.id = i1.parent_id))
# # )
# # SEARCH BREADTH FIRST BY id SET ordercol
# # CYCLE id SET is_cycle TO 1 DEFAULT 2 USING path
# # SELECT * FROM t
def with_recursive(name, nonrecursive, recursive, opts=OPTS)
raise(Error, 'This dataset does not support common table expressions') unless supports_cte?
if hoist_cte?(nonrecursive)
s, ds = hoist_cte(nonrecursive)
s.with_recursive(name, ds, recursive, opts)
elsif hoist_cte?(recursive)
s, ds = hoist_cte(recursive)
s.with_recursive(name, nonrecursive, ds, opts)
else
clone(:with=>((@opts[:with]||EMPTY_ARRAY) + [Hash[opts].merge!(:recursive=>true, :name=>name, :dataset=>nonrecursive.union(recursive, {:all=>opts[:union_all] != false, :from_self=>false}))]).freeze)
end
end
if TRUE_FREEZE
# Return a clone of the dataset extended with the given modules.
# Note that like Object#extend, when multiple modules are provided
# as arguments the cloned dataset is extended with the modules in reverse
# order. If a block is provided, a DatasetModule is created using the block and
# the clone is extended with that module after any modules given as arguments.
def with_extend(*mods, &block)
c = _clone(:freeze=>false)
c.extend(*mods) unless mods.empty?
c.extend(DatasetModule.new(&block)) if block
c.freeze
end
else
# :nocov:
def with_extend(*mods, &block) # :nodoc:
c = clone
c.extend(*mods) unless mods.empty?
c.extend(DatasetModule.new(&block)) if block
c
end
# :nocov:
end
# Returns a cloned dataset with the given row_proc.
#
# ds = DB[:items]
# ds.all # => [{:id=>2}]
# ds.with_row_proc(:invert.to_proc).all # => [{2=>:id}]
def with_row_proc(callable)
clone(:row_proc=>callable)
end
# Returns a copy of the dataset with the static SQL used. This is useful if you want
# to keep the same row_proc/graph, but change the SQL used to custom SQL.
#
# DB[:items].with_sql('SELECT * FROM foo') # SELECT * FROM foo
#
# You can use placeholders in your SQL and provide arguments for those placeholders:
#
# DB[:items].with_sql('SELECT ? FROM foo', 1) # SELECT 1 FROM foo
#
# You can also provide a method name and arguments to call to get the SQL:
#
# DB[:items].with_sql(:insert_sql, b: 1) # INSERT INTO items (b) VALUES (1)
#
# Note that datasets that specify custom SQL using this method will generally
# ignore future dataset methods that modify the SQL used, as specifying custom SQL
# overrides Sequel's SQL generator. You should probably limit yourself to the following
# dataset methods when using this method, or use the implicit_subquery extension:
#
# * each
# * all
# * single_record (if only one record could be returned)
# * single_value (if only one record could be returned, and a single column is selected)
# * map
# * as_hash
# * to_hash
# * to_hash_groups
# * delete (if a DELETE statement)
# * update (if an UPDATE statement, with no arguments)
# * insert (if an INSERT statement, with no arguments)
# * truncate (if a TRUNCATE statement, with no arguments)
def with_sql(sql, *args)
if sql.is_a?(Symbol)
sql = public_send(sql, *args)
else
sql = SQL::PlaceholderLiteralString.new(sql, args) unless args.empty?
end
clone(:sql=>sql)
end
protected
# Add the dataset to the list of compounds
def compound_clone(type, dataset, opts)
if dataset.is_a?(Dataset) && dataset.opts[:with] && !supports_cte_in_compounds?
s, ds = hoist_cte(dataset)
return s.compound_clone(type, ds, opts)
end
ds = compound_from_self.clone(:compounds=>(Array(@opts[:compounds]).map(&:dup) + [[type, dataset.compound_from_self, opts[:all]].freeze]).freeze)
opts[:from_self] == false ? ds : ds.from_self(opts)
end
# Return true if the dataset has a non-nil value for any key in opts.
def options_overlap(opts)
!(@opts.map{|k,v| k unless v.nil?}.compact & opts).empty?
end
# From types allowed to be considered a simple_select_all
SIMPLE_SELECT_ALL_ALLOWED_FROM = [Symbol, SQL::Identifier, SQL::QualifiedIdentifier].freeze
# Whether this dataset is a simple select from an underlying table, such as:
#
# SELECT * FROM table
# SELECT table.* FROM table
def simple_select_all?
return false unless (f = @opts[:from]) && f.length == 1
o = @opts.reject{|k,v| v.nil? || non_sql_option?(k)}
from = f.first
from = from.expression if from.is_a?(SQL::AliasedExpression)
if SIMPLE_SELECT_ALL_ALLOWED_FROM.any?{|x| from.is_a?(x)}
case o.length
when 1
true
when 2
(s = o[:select]) && s.length == 1 && s.first.is_a?(SQL::ColumnAll)
else
false
end
else
false
end
end
private
# Load the extensions into the receiver, without checking if the receiver is frozen.
def _extension!(exts)
Sequel.extension(*exts)
exts.each do |ext|
if pr = Sequel.synchronize{EXTENSIONS[ext]}
pr.call(self)
else
raise(Error, "Extension #{ext} does not have specific support handling individual datasets (try: Sequel.extension #{ext.inspect})")
end
end
self
end
# If invert is true, invert the condition.
def _invert_filter(cond, invert)
if invert
SQL::BooleanExpression.invert(cond)
else
cond
end
end
# Append to the current MERGE WHEN clauses.
# Mutates the hash to add the conditions, if a virtual row block is passed.
def _merge_when(hash, &block)
hash[:conditions] = Sequel.virtual_row(&block) if block
if merge_when = @opts[:merge_when]
clone(:merge_when => (merge_when.dup << hash.freeze).freeze)
else
clone(:merge_when => [hash.freeze].freeze)
end
end
# Add the given filter condition. Arguments:
# clause :: Symbol or which SQL clause to effect, should be :where or :having
# cond :: The filter condition to add
# invert :: Whether the condition should be inverted (true or false)
# combine :: How to combine the condition with an existing condition, should be :AND or :OR
def add_filter(clause, cond, invert=false, combine=:AND, &block)
if cond == EMPTY_ARRAY && !block
raise Error, "must provide an argument to a filtering method if not passing a block"
end
cond = cond.first if cond.size == 1
empty = cond == OPTS || cond == EMPTY_ARRAY
if empty && !block
self
else
if cond == nil
cond = Sequel::NULL
end
if empty && block
cond = nil
end
cond = _invert_filter(filter_expr(cond, &block), invert)
cond = SQL::BooleanExpression.new(combine, @opts[clause], cond) if @opts[clause]
if cond.nil?
cond = Sequel::NULL
end
clone(clause => cond)
end
end
# The default :qualify option to use for join tables if one is not specified.
def default_join_table_qualification
:symbol
end
# SQL expression object based on the expr type. See +where+.
def filter_expr(expr = nil, &block)
expr = nil if expr == EMPTY_ARRAY
if block
cond = filter_expr(Sequel.virtual_row(&block))
cond = SQL::BooleanExpression.new(:AND, filter_expr(expr), cond) if expr
return cond
end
case expr
when Hash
SQL::BooleanExpression.from_value_pairs(expr)
when Array
if Sequel.condition_specifier?(expr)
SQL::BooleanExpression.from_value_pairs(expr)
else
raise Error, "Invalid filter expression: #{expr.inspect}"
end
when LiteralString
LiteralString.new("(#{expr})")
when Numeric, SQL::NumericExpression, SQL::StringExpression, Proc, String
raise Error, "Invalid filter expression: #{expr.inspect}"
when TrueClass, FalseClass
if supports_where_true?
SQL::BooleanExpression.new(:NOOP, expr)
elsif expr
SQL::Constants::SQLTRUE
else
SQL::Constants::SQLFALSE
end
when PlaceholderLiteralizer::Argument
expr.transform{|v| filter_expr(v)}
when SQL::PlaceholderLiteralString
expr.with_parens
else
expr
end
end
# Return two datasets, the first a clone of the receiver with the WITH
# clause from the given dataset added to it, and the second a clone of
# the given dataset with the WITH clause removed.
def hoist_cte(ds)
[clone(:with => ((opts[:with] || EMPTY_ARRAY) + ds.opts[:with]).freeze), ds.clone(:with => nil)]
end
# Whether CTEs need to be hoisted from the given ds into the current ds.
def hoist_cte?(ds)
ds.is_a?(Dataset) && ds.opts[:with] && !supports_cte_in_subqueries?
end
# Inverts the given order by breaking it into a list of column references
# and inverting them.
#
# DB[:items].invert_order([Sequel.desc(:id)]]) #=> [Sequel.asc(:id)]
# DB[:items].invert_order([:category, Sequel.desc(:price)]) #=> [Sequel.desc(:category), Sequel.asc(:price)]
def invert_order(order)
return unless order
order.map do |f|
case f
when SQL::OrderedExpression
f.invert
else
SQL::OrderedExpression.new(f)
end
end
end
# Return self if the dataset already has a server, or a cloned dataset with the
# default server otherwise.
def default_server
server?(:default)
end
# Whether the given option key does not affect the generated SQL.
def non_sql_option?(key)
NON_SQL_OPTIONS.include?(key)
end
# Treat the +block+ as a virtual_row block if not +nil+ and
# add the resulting columns to the +columns+ array (modifies +columns+).
def virtual_row_columns(columns, block)
if block
v = Sequel.virtual_row(&block)
if v.is_a?(Array)
columns.concat(v)
else
columns << v
end
end
end
end
end
sequel-5.63.0/lib/sequel/dataset/sql.rb 0000664 0000000 0000000 00000156141 14342141206 0017725 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
class Dataset
# ---------------------
# :section: 3 - User Methods relating to SQL Creation
# These are methods you can call to see what SQL will be generated by the dataset.
# ---------------------
# Returns an EXISTS clause for the dataset as an SQL::PlaceholderLiteralString.
#
# DB.select(1).where(DB[:items].exists)
# # SELECT 1 WHERE (EXISTS (SELECT * FROM items))
def exists
SQL::PlaceholderLiteralString.new(EXISTS, [self], true)
end
# Returns an INSERT SQL query string. See +insert+.
#
# DB[:items].insert_sql(a: 1)
# # => "INSERT INTO items (a) VALUES (1)"
def insert_sql(*values)
return static_sql(@opts[:sql]) if @opts[:sql]
check_insert_allowed!
columns, values = _parse_insert_sql_args(values)
if values.is_a?(Array) && values.empty? && !insert_supports_empty_values?
columns, values = insert_empty_columns_values
elsif values.is_a?(Dataset) && hoist_cte?(values) && supports_cte?(:insert)
ds, values = hoist_cte(values)
return ds.clone(:columns=>columns, :values=>values).send(:_insert_sql)
end
clone(:columns=>columns, :values=>values).send(:_insert_sql)
end
# Append a literal representation of a value to the given SQL string.
#
# If an unsupported object is given, an +Error+ is raised.
def literal_append(sql, v)
case v
when Symbol
if skip_symbol_cache?
literal_symbol_append(sql, v)
else
unless l = db.literal_symbol(v)
l = String.new
literal_symbol_append(l, v)
db.literal_symbol_set(v, l)
end
sql << l
end
when String
case v
when LiteralString
sql << v
when SQL::Blob
literal_blob_append(sql, v)
else
literal_string_append(sql, v)
end
when Integer
sql << literal_integer(v)
when Hash
literal_hash_append(sql, v)
when SQL::Expression
literal_expression_append(sql, v)
when Float
sql << literal_float(v)
when BigDecimal
sql << literal_big_decimal(v)
when NilClass
sql << literal_nil
when TrueClass
sql << literal_true
when FalseClass
sql << literal_false
when Array
literal_array_append(sql, v)
when Time
v.is_a?(SQLTime) ? literal_sqltime_append(sql, v) : literal_time_append(sql, v)
when DateTime
literal_datetime_append(sql, v)
when Date
sql << literal_date(v)
when Dataset
literal_dataset_append(sql, v)
else
literal_other_append(sql, v)
end
end
# The SQL to use for the MERGE statement.
def merge_sql
raise Error, "This database doesn't support MERGE" unless supports_merge?
if sql = opts[:sql]
return static_sql(sql)
end
if sql = cache_get(:_merge_sql)
return sql
end
source, join_condition = @opts[:merge_using]
raise Error, "No USING clause for MERGE" unless source
sql = @opts[:append_sql] || sql_string_origin
select_with_sql(sql)
sql << "MERGE INTO "
source_list_append(sql, @opts[:from])
sql << " USING "
identifier_append(sql, source)
sql << " ON "
literal_append(sql, join_condition)
_merge_when_sql(sql)
cache_set(:_merge_sql, sql) if cache_sql?
sql
end
# Returns an array of insert statements for inserting multiple records.
# This method is used by +multi_insert+ to format insert statements and
# expects a keys array and and an array of value arrays.
def multi_insert_sql(columns, values)
case multi_insert_sql_strategy
when :values
sql = LiteralString.new('VALUES ')
expression_list_append(sql, values.map{|r| Array(r)})
[insert_sql(columns, sql)]
when :union
c = false
sql = LiteralString.new
u = ' UNION ALL SELECT '
f = empty_from_sql
values.each do |v|
if c
sql << u
else
sql << 'SELECT '
c = true
end
expression_list_append(sql, v)
sql << f if f
end
[insert_sql(columns, sql)]
else
values.map{|r| insert_sql(columns, r)}
end
end
# Same as +select_sql+, not aliased directly to make subclassing simpler.
def sql
select_sql
end
# Returns a TRUNCATE SQL query string. See +truncate+
#
# DB[:items].truncate_sql # => 'TRUNCATE items'
def truncate_sql
if opts[:sql]
static_sql(opts[:sql])
else
check_truncation_allowed!
check_not_limited!(:truncate)
raise(InvalidOperation, "Can't truncate filtered datasets") if opts[:where] || opts[:having]
t = String.new
source_list_append(t, opts[:from])
_truncate_sql(t)
end
end
# Formats an UPDATE statement using the given values. See +update+.
#
# DB[:items].update_sql(price: 100, category: 'software')
# # => "UPDATE items SET price = 100, category = 'software'
#
# Raises an +Error+ if the dataset is grouped or includes more
# than one table.
def update_sql(values = OPTS)
return static_sql(opts[:sql]) if opts[:sql]
check_update_allowed!
check_not_limited!(:update)
case values
when LiteralString
# nothing
when String
raise Error, "plain string passed to Dataset#update is not supported, use Sequel.lit to use a literal string"
end
clone(:values=>values).send(:_update_sql)
end
# ---------------------
# :section: 9 - Internal Methods relating to SQL Creation
# These methods, while public, are not designed to be used directly by the end user.
# ---------------------
# Given a type (e.g. select) and an array of clauses,
# return an array of methods to call to build the SQL string.
def self.clause_methods(type, clauses)
clauses.map{|clause| :"#{type}_#{clause}_sql"}.freeze
end
# Define a dataset literalization method for the given type in the given module,
# using the given clauses.
#
# Arguments:
# mod :: Module in which to define method
# type :: Type of SQL literalization method to create, either :select, :insert, :update, or :delete
# clauses :: array of clauses that make up the SQL query for the type. This can either be a single
# array of symbols/strings, or it can be an array of pairs, with the first element in
# each pair being an if/elsif/else code fragment, and the second element in each pair
# being an array of symbol/strings for the appropriate branch.
def self.def_sql_method(mod, type, clauses)
priv = type == :update || type == :insert
cacheable = type == :select || type == :delete
lines = []
lines << 'private' if priv
lines << "def #{'_' if priv}#{type}_sql"
lines << 'if sql = opts[:sql]; return static_sql(sql) end' unless priv
lines << "if sql = cache_get(:_#{type}_sql); return sql end" if cacheable
lines << 'check_delete_allowed!' << 'check_not_limited!(:delete)' if type == :delete
lines << 'sql = @opts[:append_sql] || sql_string_origin'
if clauses.all?{|c| c.is_a?(Array)}
clauses.each do |i, cs|
lines << i
lines.concat(clause_methods(type, cs).map{|x| "#{x}(sql)"})
end
lines << 'end'
else
lines.concat(clause_methods(type, clauses).map{|x| "#{x}(sql)"})
end
lines << "cache_set(:_#{type}_sql, sql) if cache_sql?" if cacheable
lines << 'sql'
lines << 'end'
mod.class_eval lines.join("\n"), __FILE__, __LINE__
end
def_sql_method(self, :delete, %w'delete from where')
def_sql_method(self, :insert, %w'insert into columns values')
def_sql_method(self, :select, %w'with select distinct columns from join where group having compounds order limit lock')
def_sql_method(self, :update, %w'update table set where')
WILDCARD = LiteralString.new('*').freeze
COUNT_OF_ALL_AS_COUNT = SQL::Function.new(:count, WILDCARD).as(:count)
DEFAULT = LiteralString.new('DEFAULT').freeze
EXISTS = ['EXISTS '.freeze].freeze
BITWISE_METHOD_MAP = {:& =>:BITAND, :| => :BITOR, :^ => :BITXOR}.freeze
COUNT_FROM_SELF_OPTS = [:distinct, :group, :sql, :limit, :offset, :compounds].freeze
IS_LITERALS = {nil=>'NULL'.freeze, true=>'TRUE'.freeze, false=>'FALSE'.freeze}.freeze
QUALIFY_KEYS = [:select, :where, :having, :order, :group].freeze
IS_OPERATORS = ::Sequel::SQL::ComplexExpression::IS_OPERATORS
LIKE_OPERATORS = ::Sequel::SQL::ComplexExpression::LIKE_OPERATORS
N_ARITY_OPERATORS = ::Sequel::SQL::ComplexExpression::N_ARITY_OPERATORS
TWO_ARITY_OPERATORS = ::Sequel::SQL::ComplexExpression::TWO_ARITY_OPERATORS
REGEXP_OPERATORS = ::Sequel::SQL::ComplexExpression::REGEXP_OPERATORS
[:literal, :quote_identifier, :quote_schema_table].each do |meth|
class_eval(<<-END, __FILE__, __LINE__ + 1)
def #{meth}(*args, &block)
s = ''.dup
#{meth}_append(s, *args, &block)
s
end
END
end
# Append literalization of aliased expression to SQL string.
def aliased_expression_sql_append(sql, ae)
literal_append(sql, ae.expression)
as_sql_append(sql, ae.alias, ae.columns)
end
# Append literalization of array to SQL string.
def array_sql_append(sql, a)
if a.empty?
sql << '(NULL)'
else
sql << '('
expression_list_append(sql, a)
sql << ')'
end
end
# Append literalization of boolean constant to SQL string.
def boolean_constant_sql_append(sql, constant)
if (constant == true || constant == false) && !supports_where_true?
sql << (constant == true ? '(1 = 1)' : '(1 = 0)')
else
literal_append(sql, constant)
end
end
# Append literalization of case expression to SQL string.
def case_expression_sql_append(sql, ce)
sql << '(CASE'
if ce.expression?
sql << ' '
literal_append(sql, ce.expression)
end
w = " WHEN "
t = " THEN "
ce.conditions.each do |c,r|
sql << w
literal_append(sql, c)
sql << t
literal_append(sql, r)
end
sql << " ELSE "
literal_append(sql, ce.default)
sql << " END)"
end
# Append literalization of cast expression to SQL string.
def cast_sql_append(sql, expr, type)
sql << 'CAST('
literal_append(sql, expr)
sql << ' AS ' << db.cast_type_literal(type).to_s
sql << ')'
end
# Append literalization of column all selection to SQL string.
def column_all_sql_append(sql, ca)
qualified_identifier_sql_append(sql, ca.table, WILDCARD)
end
# Append literalization of complex expression to SQL string.
def complex_expression_sql_append(sql, op, args)
case op
when *IS_OPERATORS
r = args[1]
if r.nil? || supports_is_true?
raise(InvalidOperation, 'Invalid argument used for IS operator') unless val = IS_LITERALS[r]
sql << '('
literal_append(sql, args[0])
sql << ' ' << op.to_s << ' '
sql << val << ')'
elsif op == :IS
complex_expression_sql_append(sql, :"=", args)
else
complex_expression_sql_append(sql, :OR, [SQL::BooleanExpression.new(:"!=", *args), SQL::BooleanExpression.new(:IS, args[0], nil)])
end
when :IN, :"NOT IN"
cols = args[0]
vals = args[1]
col_array = true if cols.is_a?(Array)
if vals.is_a?(Array)
val_array = true
empty_val_array = vals == []
end
if empty_val_array
literal_append(sql, empty_array_value(op, cols))
elsif col_array
if !supports_multiple_column_in?
if val_array
expr = SQL::BooleanExpression.new(:OR, *vals.to_a.map{|vs| SQL::BooleanExpression.from_value_pairs(cols.to_a.zip(vs).map{|c, v| [c, v]})})
literal_append(sql, op == :IN ? expr : ~expr)
else
old_vals = vals
vals = vals.naked if vals.is_a?(Sequel::Dataset)
vals = vals.to_a
val_cols = old_vals.columns
complex_expression_sql_append(sql, op, [cols, vals.map!{|x| x.values_at(*val_cols)}])
end
else
# If the columns and values are both arrays, use array_sql instead of
# literal so that if values is an array of two element arrays, it
# will be treated as a value list instead of a condition specifier.
sql << '('
literal_append(sql, cols)
sql << ' ' << op.to_s << ' '
if val_array
array_sql_append(sql, vals)
else
literal_append(sql, vals)
end
sql << ')'
end
else
sql << '('
literal_append(sql, cols)
sql << ' ' << op.to_s << ' '
literal_append(sql, vals)
sql << ')'
end
when :LIKE, :'NOT LIKE'
sql << '('
literal_append(sql, args[0])
sql << ' ' << op.to_s << ' '
literal_append(sql, args[1])
if requires_like_escape?
sql << " ESCAPE "
literal_append(sql, "\\")
end
sql << ')'
when :ILIKE, :'NOT ILIKE'
complex_expression_sql_append(sql, (op == :ILIKE ? :LIKE : :"NOT LIKE"), args.map{|v| Sequel.function(:UPPER, v)})
when :**
function_sql_append(sql, Sequel.function(:power, *args))
when *TWO_ARITY_OPERATORS
if REGEXP_OPERATORS.include?(op) && !supports_regexp?
raise InvalidOperation, "Pattern matching via regular expressions is not supported on #{db.database_type}"
end
sql << '('
literal_append(sql, args[0])
sql << ' ' << op.to_s << ' '
literal_append(sql, args[1])
sql << ')'
when *N_ARITY_OPERATORS
sql << '('
c = false
op_str = " #{op} "
args.each do |a|
sql << op_str if c
literal_append(sql, a)
c ||= true
end
sql << ')'
when :NOT
sql << 'NOT '
literal_append(sql, args[0])
when :NOOP
literal_append(sql, args[0])
when :'B~'
sql << '~'
literal_append(sql, args[0])
when :extract
sql << 'extract(' << args[0].to_s << ' FROM '
literal_append(sql, args[1])
sql << ')'
else
raise(InvalidOperation, "invalid operator #{op}")
end
end
# Append literalization of constant to SQL string.
def constant_sql_append(sql, constant)
sql << constant.to_s
end
# Append literalization of delayed evaluation to SQL string,
# causing the delayed evaluation proc to be evaluated.
def delayed_evaluation_sql_append(sql, delay)
# Delayed evaluations are used specifically so the SQL
# can differ in subsequent calls, so we definitely don't
# want to cache the sql in this case.
disable_sql_caching!
if recorder = @opts[:placeholder_literalizer]
recorder.use(sql, lambda{delay.call(self)}, nil)
else
literal_append(sql, delay.call(self))
end
end
# Append literalization of function call to SQL string.
def function_sql_append(sql, f)
name = f.name
opts = f.opts
if opts[:emulate]
if emulate_function?(name)
emulate_function_sql_append(sql, f)
return
end
name = native_function_name(name)
end
sql << 'LATERAL ' if opts[:lateral]
case name
when SQL::Identifier
if supports_quoted_function_names? && opts[:quoted]
literal_append(sql, name)
else
sql << name.value.to_s
end
when SQL::QualifiedIdentifier
if supports_quoted_function_names? && opts[:quoted] != false
literal_append(sql, name)
else
sql << split_qualifiers(name).join('.')
end
else
if supports_quoted_function_names? && opts[:quoted]
quote_identifier_append(sql, name)
else
sql << name.to_s
end
end
sql << '('
if filter = opts[:filter]
filter = filter_expr(filter, &opts[:filter_block])
end
if opts[:*]
if filter && !supports_filtered_aggregates?
literal_append(sql, Sequel.case({filter=>1}, nil))
filter = nil
else
sql << '*'
end
else
sql << "DISTINCT " if opts[:distinct]
if filter && !supports_filtered_aggregates?
expression_list_append(sql, f.args.map{|arg| Sequel.case({filter=>arg}, nil)})
filter = nil
else
expression_list_append(sql, f.args)
end
if order = opts[:order]
sql << " ORDER BY "
expression_list_append(sql, order)
end
end
sql << ')'
if group = opts[:within_group]
sql << " WITHIN GROUP (ORDER BY "
expression_list_append(sql, group)
sql << ')'
end
if filter
sql << " FILTER (WHERE "
literal_append(sql, filter)
sql << ')'
end
if window = opts[:over]
sql << ' OVER '
window_sql_append(sql, window.opts)
end
if opts[:with_ordinality]
sql << " WITH ORDINALITY"
end
end
# Append literalization of JOIN clause without ON or USING to SQL string.
def join_clause_sql_append(sql, jc)
table = jc.table
table_alias = jc.table_alias
table_alias = nil if table == table_alias && !jc.column_aliases
sql << ' ' << join_type_sql(jc.join_type) << ' '
identifier_append(sql, table)
as_sql_append(sql, table_alias, jc.column_aliases) if table_alias
end
# Append literalization of JOIN ON clause to SQL string.
def join_on_clause_sql_append(sql, jc)
join_clause_sql_append(sql, jc)
sql << ' ON '
literal_append(sql, filter_expr(jc.on))
end
# Append literalization of JOIN USING clause to SQL string.
def join_using_clause_sql_append(sql, jc)
join_clause_sql_append(sql, jc)
join_using_clause_using_sql_append(sql, jc.using)
end
# Append literalization of negative boolean constant to SQL string.
def negative_boolean_constant_sql_append(sql, constant)
sql << 'NOT '
boolean_constant_sql_append(sql, constant)
end
# Append literalization of ordered expression to SQL string.
def ordered_expression_sql_append(sql, oe)
if emulate = requires_emulating_nulls_first?
case oe.nulls
when :first
null_order = 0
when :last
null_order = 2
end
if null_order
literal_append(sql, Sequel.case({{oe.expression=>nil}=>null_order}, 1))
sql << ", "
end
end
literal_append(sql, oe.expression)
sql << (oe.descending ? ' DESC' : ' ASC')
unless emulate
case oe.nulls
when :first
sql << " NULLS FIRST"
when :last
sql << " NULLS LAST"
end
end
end
# Append literalization of placeholder literal string to SQL string.
def placeholder_literal_string_sql_append(sql, pls)
args = pls.args
str = pls.str
sql << '(' if pls.parens
if args.is_a?(Hash)
if args.empty?
sql << str
else
re = /:(#{args.keys.map{|k| Regexp.escape(k.to_s)}.join('|')})\b/
while true
previous, q, str = str.partition(re)
sql << previous
literal_append(sql, args[($1||q[1..-1].to_s).to_sym]) unless q.empty?
break if str.empty?
end
end
elsif str.is_a?(Array)
len = args.length
str.each_with_index do |s, i|
sql << s
literal_append(sql, args[i]) unless i == len
end
unless str.length == args.length || str.length == args.length + 1
raise Error, "Mismatched number of placeholders (#{str.length}) and placeholder arguments (#{args.length}) when using placeholder array"
end
else
i = -1
match_len = args.length - 1
while true
previous, q, str = str.partition('?')
sql << previous
literal_append(sql, args.at(i+=1)) unless q.empty?
if str.empty?
unless i == match_len
raise Error, "Mismatched number of placeholders (#{i+1}) and placeholder arguments (#{args.length}) when using placeholder string"
end
break
end
end
end
sql << ')' if pls.parens
end
# Append literalization of qualified identifier to SQL string.
# If 3 arguments are given, the 2nd should be the table/qualifier and the third should be
# column/qualified. If 2 arguments are given, the 2nd should be an SQL::QualifiedIdentifier.
def qualified_identifier_sql_append(sql, table, column=(c = table.column; table = table.table; c))
identifier_append(sql, table)
sql << '.'
identifier_append(sql, column)
end
# Append literalization of unqualified identifier to SQL string.
# Adds quoting to identifiers (columns and tables). If identifiers are not
# being quoted, returns name as a string. If identifiers are being quoted
# quote the name with quoted_identifier.
def quote_identifier_append(sql, name)
if name.is_a?(LiteralString)
sql << name
else
name = name.value if name.is_a?(SQL::Identifier)
name = input_identifier(name)
if quote_identifiers?
quoted_identifier_append(sql, name)
else
sql << name
end
end
end
# Append literalization of identifier or unqualified identifier to SQL string.
def quote_schema_table_append(sql, table)
schema, table = schema_and_table(table)
if schema
quote_identifier_append(sql, schema)
sql << '.'
end
quote_identifier_append(sql, table)
end
# Append literalization of quoted identifier to SQL string.
# This method quotes the given name with the SQL standard double quote.
# should be overridden by subclasses to provide quoting not matching the
# SQL standard, such as backtick (used by MySQL and SQLite).
def quoted_identifier_append(sql, name)
sql << '"' << name.to_s.gsub('"', '""') << '"'
end
# Split the schema information from the table, returning two strings,
# one for the schema and one for the table. The returned schema may
# be nil, but the table will always have a string value.
#
# Note that this function does not handle tables with more than one
# level of qualification (e.g. database.schema.table on Microsoft
# SQL Server).
def schema_and_table(table_name, sch=nil)
sch = sch.to_s if sch
case table_name
when Symbol
s, t, _ = split_symbol(table_name)
[s||sch, t]
when SQL::QualifiedIdentifier
[table_name.table.to_s, table_name.column.to_s]
when SQL::Identifier
[sch, table_name.value.to_s]
when String
[sch, table_name]
else
raise Error, 'table_name should be a Symbol, SQL::QualifiedIdentifier, SQL::Identifier, or String'
end
end
# Splits table_name into an array of strings.
#
# ds.split_qualifiers(:s) # ['s']
# ds.split_qualifiers(Sequel[:t][:s]) # ['t', 's']
# ds.split_qualifiers(Sequel[:d][:t][:s]) # ['d', 't', 's']
# ds.split_qualifiers(Sequel.qualify(Sequel[:h][:d], Sequel[:t][:s])) # ['h', 'd', 't', 's']
def split_qualifiers(table_name, *args)
case table_name
when SQL::QualifiedIdentifier
split_qualifiers(table_name.table, nil) + split_qualifiers(table_name.column, nil)
else
sch, table = schema_and_table(table_name, *args)
sch ? [sch, table] : [table]
end
end
# Append literalization of subscripts (SQL array accesses) to SQL string.
def subscript_sql_append(sql, s)
case s.expression
when Symbol, SQL::Subscript, SQL::Identifier, SQL::QualifiedIdentifier
# nothing
else
wrap_expression = true
sql << '('
end
literal_append(sql, s.expression)
if wrap_expression
sql << ')['
else
sql << '['
end
sub = s.sub
if sub.length == 1 && (range = sub.first).is_a?(Range)
literal_append(sql, range.begin)
sql << ':'
e = range.end
e -= 1 if range.exclude_end? && e.is_a?(Integer)
literal_append(sql, e)
else
expression_list_append(sql, s.sub)
end
sql << ']'
end
# Append literalization of windows (for window functions) to SQL string.
def window_sql_append(sql, opts)
raise(Error, 'This dataset does not support window functions') unless supports_window_functions?
space = false
space_s = ' '
sql << '('
if window = opts[:window]
literal_append(sql, window)
space = true
end
if part = opts[:partition]
sql << space_s if space
sql << "PARTITION BY "
expression_list_append(sql, Array(part))
space = true
end
if order = opts[:order]
sql << space_s if space
sql << "ORDER BY "
expression_list_append(sql, Array(order))
space = true
end
if frame = opts[:frame]
sql << space_s if space
if frame.is_a?(String)
sql << frame
else
case frame
when :all
frame_type = :rows
frame_start = :preceding
frame_end = :following
when :rows, :range, :groups
frame_type = frame
frame_start = :preceding
frame_end = :current
when Hash
frame_type = frame[:type]
unless frame_type == :rows || frame_type == :range || frame_type == :groups
raise Error, "invalid window :frame :type option: #{frame_type.inspect}"
end
unless frame_start = frame[:start]
raise Error, "invalid window :frame :start option: #{frame_start.inspect}"
end
frame_end = frame[:end]
frame_exclude = frame[:exclude]
else
raise Error, "invalid window :frame option: #{frame.inspect}"
end
sql << frame_type.to_s.upcase << " "
sql << 'BETWEEN ' if frame_end
window_frame_boundary_sql_append(sql, frame_start, :preceding)
if frame_end
sql << " AND "
window_frame_boundary_sql_append(sql, frame_end, :following)
end
if frame_exclude
sql << " EXCLUDE "
case frame_exclude
when :current
sql << "CURRENT ROW"
when :group
sql << "GROUP"
when :ties
sql << "TIES"
when :no_others
sql << "NO OTHERS"
else
raise Error, "invalid window :frame :exclude option: #{frame_exclude.inspect}"
end
end
end
end
sql << ')'
end
protected
# Return a from_self dataset if an order or limit is specified, so it works as expected
# with UNION, EXCEPT, and INTERSECT clauses.
def compound_from_self
(@opts[:sql] || @opts[:limit] || @opts[:order] || @opts[:offset]) ? from_self : self
end
private
# Append the INSERT sql used in a MERGE
def _merge_insert_sql(sql, data)
sql << " THEN INSERT"
columns, values = _parse_insert_sql_args(data[:values])
_insert_columns_sql(sql, columns)
_insert_values_sql(sql, values)
end
def _merge_update_sql(sql, data)
sql << " THEN UPDATE SET "
update_sql_values_hash(sql, data[:values])
end
def _merge_delete_sql(sql, data)
sql << " THEN DELETE"
end
# Mapping of merge types to related SQL
MERGE_TYPE_SQL = {
:insert => ' WHEN NOT MATCHED',
:delete => ' WHEN MATCHED',
:update => ' WHEN MATCHED',
:matched => ' WHEN MATCHED',
:not_matched => ' WHEN NOT MATCHED',
}.freeze
private_constant :MERGE_TYPE_SQL
# Add the WHEN clauses to the MERGE SQL
def _merge_when_sql(sql)
raise Error, "no WHEN [NOT] MATCHED clauses provided for MERGE" unless merge_when = @opts[:merge_when]
merge_when.each do |data|
type = data[:type]
sql << MERGE_TYPE_SQL[type]
_merge_when_conditions_sql(sql, data)
send(:"_merge_#{type}_sql", sql, data)
end
end
# Append MERGE WHEN conditions, if there are conditions provided.
def _merge_when_conditions_sql(sql, data)
if data.has_key?(:conditions)
sql << " AND "
literal_append(sql, data[:conditions])
end
end
# Parse the values passed to insert_sql, returning columns and values
# to use for the INSERT. Returned columns is always an array, but can be empty
# for an INSERT without explicit column references. Returned values can be an
# array, dataset, or literal string.
def _parse_insert_sql_args(values)
columns = []
case values.size
when 0
values = []
when 1
case vals = values[0]
when Hash
values = []
vals.each do |k,v|
columns << k
values << v
end
when Dataset, Array, LiteralString
values = vals
end
when 2
if (v0 = values[0]).is_a?(Array) && ((v1 = values[1]).is_a?(Array) || v1.is_a?(Dataset) || v1.is_a?(LiteralString))
columns, values = v0, v1
raise(Error, "Different number of values and columns given to insert_sql") if values.is_a?(Array) and columns.length != values.length
end
end
[columns, values]
end
# Formats the truncate statement. Assumes the table given has already been
# literalized.
def _truncate_sql(table)
"TRUNCATE TABLE #{table}"
end
# Returns an appropriate symbol for the alias represented by s.
def alias_alias_symbol(s)
case s
when Symbol
s
when String
s.to_sym
when SQL::Identifier
s.value.to_s.to_sym
else
raise Error, "Invalid alias for alias_alias_symbol: #{s.inspect}"
end
end
# Returns an appropriate alias symbol for the given object, which can be
# a Symbol, String, SQL::Identifier, SQL::QualifiedIdentifier, or
# SQL::AliasedExpression.
def alias_symbol(sym)
case sym
when Symbol
s, t, a = split_symbol(sym)
a || s ? (a || t).to_sym : sym
when String
sym.to_sym
when SQL::Identifier
sym.value.to_s.to_sym
when SQL::QualifiedIdentifier
alias_symbol(sym.column)
when SQL::AliasedExpression
alias_alias_symbol(sym.alias)
else
raise Error, "Invalid alias for alias_symbol: #{sym.inspect}"
end
end
# Clone of this dataset usable in aggregate operations. Does
# a from_self if dataset contains any parameters that would
# affect normal aggregation, or just removes an existing
# order if not. Also removes the row_proc, which isn't needed
# for aggregate calculations.
def aggregate_dataset
(options_overlap(COUNT_FROM_SELF_OPTS) ? from_self : unordered).naked
end
# Append aliasing expression to SQL string.
def as_sql_append(sql, aliaz, column_aliases=nil)
sql << ' AS '
quote_identifier_append(sql, aliaz)
if column_aliases
raise Error, "#{db.database_type} does not support derived column lists" unless supports_derived_column_lists?
sql << '('
identifier_list_append(sql, column_aliases)
sql << ')'
end
end
# Don't allow caching SQL if specifically marked not to.
def cache_sql?
!@opts[:no_cache_sql] && !cache_get(:_no_cache_sql)
end
# Raise an InvalidOperation exception if modification is not allowed for this dataset.
# Check whether it is allowed to insert into this dataset.
# Only for backwards compatibility with older external adapters.
def check_modification_allowed!
# SEQUEL6: Remove
Sequel::Deprecation.deprecate("Dataset#check_modification_allowed!", "Use check_{insert,delete,update,truncation}_allowed! instead")
_check_modification_allowed!(supports_modifying_joins?)
end
# Check whether it is allowed to insert into this dataset.
def check_insert_allowed!
_check_modification_allowed!(false)
end
alias check_truncation_allowed! check_insert_allowed!
# Check whether it is allowed to delete from this dataset.
def check_delete_allowed!
_check_modification_allowed!(supports_deleting_joins?)
end
# Check whether it is allowed to update this dataset.
def check_update_allowed!
_check_modification_allowed!(supports_updating_joins?)
end
# Internals of the check_*_allowed! methods
def _check_modification_allowed!(modifying_joins_supported)
raise(InvalidOperation, "Grouped datasets cannot be modified") if opts[:group]
raise(InvalidOperation, "Joined datasets cannot be modified") if !modifying_joins_supported && joined_dataset?
end
# Raise error if the dataset uses limits or offsets.
def check_not_limited!(type)
return if @opts[:skip_limit_check] && type != :truncate
raise InvalidOperation, "Dataset##{type} not supported on datasets with limits or offsets" if opts[:limit] || opts[:offset]
end
# Append column list to SQL string.
# If the column list is empty, a wildcard (*) is appended.
def column_list_append(sql, columns)
if (columns.nil? || columns.empty?)
sql << '*'
else
expression_list_append(sql, columns)
end
end
# Yield each pair of arguments to the block, which should
# return an object representing the SQL expression for those
# two arguments. For more than two arguments, the first
# argument to the block will be result of the previous block call.
def complex_expression_arg_pairs(args)
case args.length
when 1
args[0]
when 2
yield args[0], args[1]
else
args.inject{|m, a| yield(m, a)}
end
end
# Append the literalization of the args using complex_expression_arg_pairs
# to the given SQL string, used when database operator/function is 2-ary
# where Sequel expression is N-ary.
def complex_expression_arg_pairs_append(sql, args, &block)
literal_append(sql, complex_expression_arg_pairs(args, &block))
end
# Append literalization of complex expression to SQL string, for
# operators unsupported by some databases. Used by adapters for databases
# that don't support the operators natively.
def complex_expression_emulate_append(sql, op, args)
# :nocov:
case op
# :nocov:
when :%
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.function(:MOD, a, b)}
when :>>
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel./(a, Sequel.function(:power, 2, b))}
when :<<
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.*(a, Sequel.function(:power, 2, b))}
when :&, :|, :^
f = BITWISE_METHOD_MAP[op]
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.function(f, a, b)}
when :'B~'
sql << "((0 - "
literal_append(sql, args[0])
sql << ") - 1)"
end
end
# Append literalization of dataset used in UNION/INTERSECT/EXCEPT clause to SQL string.
def compound_dataset_sql_append(sql, ds)
subselect_sql_append(sql, ds)
end
# The alias to use for datasets, takes a number to make sure the name is unique.
def dataset_alias(number)
:"t#{number}"
end
# The strftime format to use when literalizing the time.
def default_timestamp_format
requires_sql_standard_datetimes? ? "TIMESTAMP '%Y-%m-%d %H:%M:%S%N%z'" : "'%Y-%m-%d %H:%M:%S%N%z'"
end
def delete_delete_sql(sql)
sql << 'DELETE'
end
def delete_from_sql(sql)
if f = @opts[:from]
sql << ' FROM '
source_list_append(sql, f)
end
end
# Disable caching of SQL for the current dataset
def disable_sql_caching!
cache_set(:_no_cache_sql, true)
end
# An SQL FROM clause to use in SELECT statements where the dataset has
# no from tables.
def empty_from_sql
nil
end
# Whether to emulate the function with the given name. This should only be true
# if the emulation goes beyond choosing a function with a different name.
def emulate_function?(name)
false
end
# Append literalization of array of expressions to SQL string, separating them
# with commas.
def expression_list_append(sql, columns)
c = false
co = ', '
columns.each do |col|
sql << co if c
literal_append(sql, col)
c ||= true
end
end
# Append literalization of array of grouping elements to SQL string, seperating them with commas.
def grouping_element_list_append(sql, columns)
c = false
co = ', '
columns.each do |col|
sql << co if c
if col.is_a?(Array) && col.empty?
sql << '()'
else
literal_append(sql, Array(col))
end
c ||= true
end
end
# An expression for how to handle an empty array lookup.
def empty_array_value(op, cols)
{1 => ((op == :IN) ? 0 : 1)}
end
# Format the timestamp based on the default_timestamp_format, with a couple
# of modifiers. First, allow %N to be used for fractions seconds (if the
# database supports them), and override %z to always use a numeric offset
# of hours and minutes.
def format_timestamp(v)
v2 = db.from_application_timestamp(v)
fmt = default_timestamp_format.gsub(/%[Nz]/) do |m|
if m == '%N'
# Ruby 1.9 supports %N in timestamp formats, but Sequel has supported %N
# for longer in a different way, where the . is already appended and only 6
# decimal places are used by default.
format_timestamp_usec(v.is_a?(DateTime) ? v.sec_fraction*(1000000) : v.usec) if supports_timestamp_usecs?
else
if supports_timestamp_timezones?
# Would like to just use %z format, but it doesn't appear to work on Windows
# Instead, the offset fragment is constructed manually
minutes = (v2.is_a?(DateTime) ? v2.offset * 1440 : v2.utc_offset/60).to_i
format_timestamp_offset(*minutes.divmod(60))
end
end
end
v2.strftime(fmt)
end
# Return the SQL timestamp fragment to use for the timezone offset.
def format_timestamp_offset(hour, minute)
sprintf("%+03i%02i", hour, minute)
end
# Return the SQL timestamp fragment to use for the fractional time part.
# Should start with the decimal point. Uses 6 decimal places by default.
def format_timestamp_usec(usec, ts=timestamp_precision)
unless ts == 6
usec = usec/(10 ** (6 - ts))
end
sprintf(".%0#{ts}d", usec)
end
# Append literalization of identifier to SQL string, considering regular strings
# as SQL identifiers instead of SQL strings.
def identifier_append(sql, v)
if v.is_a?(String)
case v
when LiteralString
sql << v
when SQL::Blob
literal_append(sql, v)
else
quote_identifier_append(sql, v)
end
else
literal_append(sql, v)
end
end
# Append literalization of array of identifiers to SQL string.
def identifier_list_append(sql, args)
c = false
comma = ', '
args.each do |a|
sql << comma if c
identifier_append(sql, a)
c ||= true
end
end
# Upcase identifiers by default when inputting them into the database.
def input_identifier(v)
v.to_s.upcase
end
def insert_into_sql(sql)
sql << " INTO "
if (f = @opts[:from]) && f.length == 1
identifier_append(sql, unaliased_identifier(f.first))
else
source_list_append(sql, f)
end
end
def insert_columns_sql(sql)
_insert_columns_sql(sql, opts[:columns])
end
def _insert_columns_sql(sql, columns)
if columns && !columns.empty?
sql << ' ('
identifier_list_append(sql, columns)
sql << ')'
end
end
# The columns and values to use for an empty insert if the database doesn't support
# INSERT with DEFAULT VALUES.
def insert_empty_columns_values
[[columns.last], [DEFAULT]]
end
def insert_insert_sql(sql)
sql << "INSERT"
end
def insert_values_sql(sql)
_insert_values_sql(sql, opts[:values])
end
def _insert_values_sql(sql, values)
case values
when Array
if values.empty?
sql << " DEFAULT VALUES"
else
sql << " VALUES "
literal_append(sql, values)
end
when Dataset
sql << ' '
subselect_sql_append(sql, values)
when LiteralString
sql << ' ' << values
else
raise Error, "Unsupported INSERT values type, should be an Array or Dataset: #{values.inspect}"
end
end
def insert_returning_sql(sql)
if opts.has_key?(:returning)
sql << " RETURNING "
column_list_append(sql, Array(opts[:returning]))
end
end
alias delete_returning_sql insert_returning_sql
alias update_returning_sql insert_returning_sql
# SQL fragment specifying a JOIN type, converts underscores to
# spaces and upcases.
def join_type_sql(join_type)
"#{join_type.to_s.gsub('_', ' ').upcase} JOIN"
end
# Append USING clause for JOIN USING
def join_using_clause_using_sql_append(sql, using_columns)
sql << ' USING ('
column_list_append(sql, using_columns)
sql << ')'
end
# Append a literalization of the array to SQL string.
# Treats as an expression if an array of all two pairs, or as a SQL array otherwise.
def literal_array_append(sql, v)
if Sequel.condition_specifier?(v)
literal_expression_append(sql, SQL::BooleanExpression.from_value_pairs(v))
else
array_sql_append(sql, v)
end
end
# SQL fragment for BigDecimal
def literal_big_decimal(v)
d = v.to_s("F")
v.nan? || v.infinite? ? "'#{d}'" : d
end
# Append literalization of SQL::Blob to SQL string.
def literal_blob_append(sql, v)
literal_string_append(sql, v)
end
# Append literalization of dataset to SQL string. Does a subselect inside parantheses.
def literal_dataset_append(sql, v)
sql << 'LATERAL ' if v.opts[:lateral]
sql << '('
subselect_sql_append(sql, v)
sql << ')'
end
# SQL fragment for Date, using the ISO8601 format.
def literal_date(v)
if requires_sql_standard_datetimes?
v.strftime("DATE '%Y-%m-%d'")
else
v.strftime("'%Y-%m-%d'")
end
end
# SQL fragment for DateTime
def literal_datetime(v)
format_timestamp(v)
end
# Append literalization of DateTime to SQL string.
def literal_datetime_append(sql, v)
sql << literal_datetime(v)
end
# Append literalization of SQL::Expression to SQL string.
def literal_expression_append(sql, v)
v.to_s_append(self, sql)
end
# SQL fragment for false
def literal_false
"'f'"
end
# SQL fragment for Float
def literal_float(v)
v.to_s
end
# Append literalization of Hash to SQL string, treating hash as a boolean expression.
def literal_hash_append(sql, v)
literal_expression_append(sql, SQL::BooleanExpression.from_value_pairs(v))
end
# SQL fragment for Integer
def literal_integer(v)
v.to_s
end
# SQL fragment for nil
def literal_nil
"NULL"
end
# Append a literalization of the object to the given SQL string.
# Calls +sql_literal_append+ if object responds to it, otherwise
# calls +sql_literal+ if object responds to it, otherwise raises an error.
# If a database specific type is allowed, this should be overriden in a subclass.
def literal_other_append(sql, v)
# We can't be sure if v will always literalize to the same SQL, so
# don't cache SQL for a dataset that uses this.
disable_sql_caching!
if v.respond_to?(:sql_literal_append)
v.sql_literal_append(self, sql)
elsif v.respond_to?(:sql_literal)
sql << v.sql_literal(self)
else
raise Error, "can't express #{v.inspect} as a SQL literal"
end
end
# SQL fragment for Sequel::SQLTime, containing just the time part
def literal_sqltime(v)
v.strftime("'%H:%M:%S#{format_timestamp_usec(v.usec, sqltime_precision) if supports_timestamp_usecs?}'")
end
# Append literalization of Sequel::SQLTime to SQL string.
def literal_sqltime_append(sql, v)
sql << literal_sqltime(v)
end
# Append literalization of string to SQL string.
def literal_string_append(sql, v)
sql << "'" << v.gsub("'", "''") << "'"
end
# Append literalization of symbol to SQL string.
def literal_symbol_append(sql, v)
c_table, column, c_alias = split_symbol(v)
if c_table
quote_identifier_append(sql, c_table)
sql << '.'
end
quote_identifier_append(sql, column)
as_sql_append(sql, c_alias) if c_alias
end
# SQL fragment for Time
def literal_time(v)
format_timestamp(v)
end
# Append literalization of Time to SQL string.
def literal_time_append(sql, v)
sql << literal_time(v)
end
# SQL fragment for true
def literal_true
"'t'"
end
# What strategy to use for import/multi_insert. While SQL-92 defaults
# to allowing multiple rows in a VALUES clause, there are enough databases
# that don't allow that that it can't be the default. Use separate queries
# by default, which works everywhere.
def multi_insert_sql_strategy
:separate
end
# Get the native function name given the emulated function name.
def native_function_name(emulated_function)
emulated_function
end
# Returns a qualified column name (including a table name) if the column
# name isn't already qualified.
def qualified_column_name(column, table)
if column.is_a?(Symbol)
c_table, column, _ = split_symbol(column)
unless c_table
case table
when Symbol
schema, table, t_alias = split_symbol(table)
t_alias ||= Sequel::SQL::QualifiedIdentifier.new(schema, table) if schema
when Sequel::SQL::AliasedExpression
t_alias = table.alias
end
c_table = t_alias || table
end
::Sequel::SQL::QualifiedIdentifier.new(c_table, column)
else
column
end
end
# Qualify the given expression to the given table.
def qualified_expression(e, table)
Qualifier.new(table).transform(e)
end
def select_columns_sql(sql)
sql << ' '
column_list_append(sql, @opts[:select])
end
def select_distinct_sql(sql)
if distinct = @opts[:distinct]
sql << " DISTINCT"
unless distinct.empty?
sql << " ON ("
expression_list_append(sql, distinct)
sql << ')'
end
end
end
# Modify the sql to add a dataset to the via an EXCEPT, INTERSECT, or UNION clause.
# This uses a subselect for the compound datasets used, because using parantheses doesn't
# work on all databases.
def select_compounds_sql(sql)
return unless c = @opts[:compounds]
c.each do |type, dataset, all|
sql << ' ' << type.to_s.upcase
sql << ' ALL' if all
sql << ' '
compound_dataset_sql_append(sql, dataset)
end
end
def select_from_sql(sql)
if f = @opts[:from]
sql << ' FROM '
source_list_append(sql, f)
elsif f = empty_from_sql
sql << f
end
end
def select_group_sql(sql)
if group = @opts[:group]
sql << " GROUP BY "
if go = @opts[:group_options]
if go == :"grouping sets"
sql << go.to_s.upcase << '('
grouping_element_list_append(sql, group)
sql << ')'
elsif uses_with_rollup?
expression_list_append(sql, group)
sql << " WITH " << go.to_s.upcase
else
sql << go.to_s.upcase << '('
expression_list_append(sql, group)
sql << ')'
end
else
expression_list_append(sql, group)
end
end
end
def select_having_sql(sql)
if having = @opts[:having]
sql << " HAVING "
literal_append(sql, having)
end
end
def select_join_sql(sql)
if js = @opts[:join]
js.each{|j| literal_append(sql, j)}
end
end
def select_limit_sql(sql)
if l = @opts[:limit]
sql << " LIMIT "
literal_append(sql, l)
if o = @opts[:offset]
sql << " OFFSET "
literal_append(sql, o)
end
elsif @opts[:offset]
select_only_offset_sql(sql)
end
end
def select_lock_sql(sql)
case l = @opts[:lock]
when :update
sql << ' FOR UPDATE'
when String
sql << ' ' << l
end
end
# Used only if there is an offset and no limit, making it easier to override
# in the adapter, as many databases do not support just a plain offset with
# no limit.
def select_only_offset_sql(sql)
sql << " OFFSET "
literal_append(sql, @opts[:offset])
end
def select_order_sql(sql)
if o = @opts[:order]
sql << " ORDER BY "
expression_list_append(sql, o)
end
end
alias delete_order_sql select_order_sql
alias update_order_sql select_order_sql
def select_select_sql(sql)
sql << 'SELECT'
end
def select_where_sql(sql)
if w = @opts[:where]
sql << " WHERE "
literal_append(sql, w)
end
end
alias delete_where_sql select_where_sql
alias update_where_sql select_where_sql
def select_window_sql(sql)
if ws = @opts[:window]
sql << " WINDOW "
c = false
co = ', '
as = ' AS '
ws.map do |name, window|
sql << co if c
literal_append(sql, name)
sql << as
literal_append(sql, window)
c ||= true
end
end
end
def select_with_sql(sql)
return unless supports_cte?
ctes = opts[:with]
return if !ctes || ctes.empty?
sql << select_with_sql_base
c = false
comma = ', '
ctes.each do |cte|
sql << comma if c
select_with_sql_cte(sql, cte)
c ||= true
end
sql << ' '
end
alias delete_with_sql select_with_sql
alias insert_with_sql select_with_sql
alias update_with_sql select_with_sql
def select_with_sql_base
"WITH "
end
def select_with_sql_cte(sql, cte)
select_with_sql_prefix(sql, cte)
literal_dataset_append(sql, cte[:dataset])
end
def select_with_sql_prefix(sql, w)
quote_identifier_append(sql, w[:name])
if args = w[:args]
sql << '('
identifier_list_append(sql, args)
sql << ')'
end
sql << ' AS '
case w[:materialized]
when true
sql << "MATERIALIZED "
when false
sql << "NOT MATERIALIZED "
end
end
# Whether the symbol cache should be skipped when literalizing the dataset
def skip_symbol_cache?
@opts[:skip_symbol_cache]
end
# Append literalization of array of sources/tables to SQL string, raising an Error if there
# are no sources.
def source_list_append(sql, sources)
raise(Error, 'No source specified for query') if sources.nil? || sources == []
identifier_list_append(sql, sources)
end
# Delegate to Sequel.split_symbol.
def split_symbol(sym)
Sequel.split_symbol(sym)
end
# The string that is appended to to create the SQL query, the empty
# string by default.
def sql_string_origin
String.new
end
# The precision to use for SQLTime instances (time column values without dates).
# Defaults to timestamp_precision.
def sqltime_precision
timestamp_precision
end
# SQL to use if this dataset uses static SQL. Since static SQL
# can be a PlaceholderLiteralString in addition to a String,
# we literalize nonstrings. If there is an append_sql for this
# dataset, append to that SQL instead of returning the value.
def static_sql(sql)
if append_sql = @opts[:append_sql]
if sql.is_a?(String)
append_sql << sql
else
literal_append(append_sql, sql)
end
else
if sql.is_a?(String)
sql
else
literal(sql)
end
end
end
# Append literalization of the subselect to SQL string.
def subselect_sql_append(sql, ds)
sds = subselect_sql_dataset(sql, ds)
subselect_sql_append_sql(sql, sds)
unless sds.send(:cache_sql?)
# If subquery dataset does not allow caching SQL,
# then this dataset should not allow caching SQL.
disable_sql_caching!
end
end
def subselect_sql_dataset(sql, ds)
ds.clone(:append_sql=>sql)
end
def subselect_sql_append_sql(sql, ds)
ds.sql
end
# The number of decimal digits of precision to use in timestamps.
def timestamp_precision
supports_timestamp_usecs? ? 6 : 0
end
def update_table_sql(sql)
sql << ' '
source_list_append(sql, @opts[:from])
select_join_sql(sql) if supports_modifying_joins?
end
def update_set_sql(sql)
sql << ' SET '
values = @opts[:values]
if values.is_a?(Hash)
update_sql_values_hash(sql, values)
else
sql << values
end
end
def update_sql_values_hash(sql, values)
c = false
eq = ' = '
values.each do |k, v|
sql << ', ' if c
if k.is_a?(String) && !k.is_a?(LiteralString)
quote_identifier_append(sql, k)
else
literal_append(sql, k)
end
sql << eq
literal_append(sql, v)
c ||= true
end
end
def update_update_sql(sql)
sql << 'UPDATE'
end
def window_frame_boundary_sql_append(sql, boundary, direction)
case boundary
when :current
sql << "CURRENT ROW"
when :preceding
sql << "UNBOUNDED PRECEDING"
when :following
sql << "UNBOUNDED FOLLOWING"
else
if boundary.is_a?(Array)
offset, direction = boundary
unless boundary.length == 2 && (direction == :preceding || direction == :following)
raise Error, "invalid window :frame boundary (:start or :end) option: #{boundary.inspect}"
end
else
offset = boundary
end
case offset
when Numeric, String, SQL::Cast
# nothing
else
raise Error, "invalid window :frame boundary (:start or :end) option: #{boundary.inspect}"
end
literal_append(sql, offset)
sql << (direction == :preceding ? " PRECEDING" : " FOLLOWING")
end
end
end
end
sequel-5.63.0/lib/sequel/deprecated.rb 0000664 0000000 0000000 00000005720 14342141206 0017575 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# This module makes it easy to print deprecation warnings with optional backtraces to a given stream.
# There are a two accessors you can use to change how/where the deprecation methods are printed
# and whether/how backtraces should be included:
#
# Sequel::Deprecation.output = $stderr # print deprecation messages to standard error (default)
# Sequel::Deprecation.output = File.open('deprecated_calls.txt', 'wb') # use a file instead
# Sequel::Deprecation.output = false # do not output deprecation messages
#
# Sequel::Deprecation.prefix = "SEQUEL DEPRECATION WARNING: " # prefix deprecation messages with a given string (default)
# Sequel::Deprecation.prefix = false # do not prefix deprecation messages
#
# Sequel::Deprecation.backtrace_filter = false # don't include backtraces
# Sequel::Deprecation.backtrace_filter = true # include full backtraces
# Sequel::Deprecation.backtrace_filter = 10 # include 10 backtrace lines (default)
# Sequel::Deprecation.backtrace_filter = 1 # include 1 backtrace line
# Sequel::Deprecation.backtrace_filter = lambda{|line, line_no| line_no < 3 || line =~ /my_app/} # select backtrace lines to output
module Deprecation
@backtrace_filter = 10
@output = $stderr
@prefix = "SEQUEL DEPRECATION WARNING: ".freeze
class << self
# How to filter backtraces. +false+ does not include backtraces, +true+ includes
# full backtraces, an Integer includes that number of backtrace lines, and
# a proc is called with the backtrace line and line number to select the backtrace
# lines to include. The default is 10 backtrace lines.
attr_accessor :backtrace_filter
# Where deprecation messages should be output, must respond to puts. $stderr by default.
attr_accessor :output
# Where deprecation messages should be prefixed with ("SEQUEL DEPRECATION WARNING: " by default).
attr_accessor :prefix
end
# Print the message and possibly backtrace to the output.
def self.deprecate(method, instead=nil)
return unless output
message = instead ? "#{method} is deprecated and will be removed in Sequel 6. #{instead}." : method
message = "#{prefix}#{message}" if prefix
output.puts(message)
case b = backtrace_filter
when Integer
caller.each do |c|
b -= 1
output.puts(c)
break if b <= 0
end
when true
caller.each{|c| output.puts(c)}
when Proc
caller.each_with_index{|line, line_no| output.puts(line) if b.call(line, line_no)}
end
nil
end
# If using ruby 2.3+, use Module#deprecate_constant to deprecate the constant,
# otherwise do nothing as the ruby implementation does not support constant deprecation.
def self.deprecate_constant(mod, constant)
# :nocov:
if RUBY_VERSION > '2.3'
# :nocov:
mod.deprecate_constant(constant)
end
end
end
end
sequel-5.63.0/lib/sequel/exceptions.rb 0000664 0000000 0000000 00000007561 14342141206 0017663 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
# The default exception class for exceptions raised by Sequel.
# All exception classes defined by Sequel are descendants of this class.
class Error < ::StandardError
# If this exception wraps an underlying exception, the underlying
# exception is held here.
attr_accessor :wrapped_exception
# :nocov:
if RUBY_VERSION >= '2.1'
# :nocov:
# Returned the wrapped exception if one exists, otherwise use
# ruby's default behavior.
def cause
wrapped_exception || super
end
end
end
(
# Error raised when the adapter requested doesn't exist or can't be loaded.
AdapterNotFound = Class.new(Error)
).name
(
# Generic error raised by the database adapters, indicating a
# problem originating from the database server. Usually raised
# because incorrect SQL syntax is used.
DatabaseError = Class.new(Error)
).name
(
# Error raised when the Sequel is unable to connect to the database with the
# connection parameters it was given.
DatabaseConnectionError = Class.new(DatabaseError)
).name
(
# Error raised by adapters when they determine that the connection
# to the database has been lost. Instructs the connection pool code to
# remove that connection from the pool so that other connections can be acquired
# automatically.
DatabaseDisconnectError = Class.new(DatabaseError)
).name
(
# Generic error raised when Sequel determines a database constraint has been violated.
ConstraintViolation = Class.new(DatabaseError)
).name
(
# Error raised when Sequel determines a database check constraint has been violated.
CheckConstraintViolation = Class.new(ConstraintViolation)
).name
(
# Error raised when Sequel determines a database foreign key constraint has been violated.
ForeignKeyConstraintViolation = Class.new(ConstraintViolation)
).name
(
# Error raised when Sequel determines a database NOT NULL constraint has been violated.
NotNullConstraintViolation = Class.new(ConstraintViolation)
).name
(
# Error raised when Sequel determines a database unique constraint has been violated.
UniqueConstraintViolation = Class.new(ConstraintViolation)
).name
(
# Error raised when Sequel determines a serialization failure/deadlock in the database.
SerializationFailure = Class.new(DatabaseError)
).name
(
# Error raised when Sequel determines the database could not acquire a necessary lock
# before timing out. Use of Dataset#nowait can often cause this exception when
# retrieving rows.
DatabaseLockTimeout = Class.new(DatabaseError)
).name
(
# Error raised on an invalid operation, such as trying to update or delete
# a joined or grouped dataset when the database does not support that.
InvalidOperation = Class.new(Error)
).name
(
# Error raised when attempting an invalid type conversion.
InvalidValue = Class.new(Error)
).name
# Error raised when the user requests a record via the first! or similar
# method, and the dataset does not yield any rows.
class NoMatchingRow < Error
# The dataset that raised this NoMatchingRow exception.
attr_accessor :dataset
# If the first argument is a Sequel::Dataset, set the dataset related to
# the exception to that argument, instead of assuming it is the exception message.
def initialize(msg=nil)
if msg.is_a?(Sequel::Dataset)
@dataset = msg
msg = nil
end
super
end
end
(
# Error raised when the connection pool cannot acquire a database connection
# before the timeout.
PoolTimeout = Class.new(Error)
).name
(
# Error that you should raise to signal a rollback of the current transaction.
# The transaction block will catch this exception, rollback the current transaction,
# and won't reraise it (unless a reraise is requested).
Rollback = Class.new(Error)
).name
end
sequel-5.63.0/lib/sequel/extensions/ 0000775 0000000 0000000 00000000000 14342141206 0017343 5 ustar 00root root 0000000 0000000 sequel-5.63.0/lib/sequel/extensions/_model_constraint_validations.rb 0000664 0000000 0000000 00000000773 14342141206 0025777 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module Plugins
module ConstraintValidations
module DatabaseMethods
# A hash of validation method call metadata for all tables in the database.
# The hash is keyed by table name string and contains arrays of validation
# method call arrays.
attr_accessor :constraint_validations
end
end
end
Database.register_extension(:_model_constraint_validations, Plugins::ConstraintValidations::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/_model_pg_row.rb 0000664 0000000 0000000 00000001411 14342141206 0022501 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
module Sequel
module Plugins
module PgRow
module DatabaseMethods
# Handle Sequel::Model instances in bound variables.
def bound_variable_arg(arg, conn)
case arg
when Sequel::Model
"(#{arg.values.values_at(*arg.columns).map{|v| bound_variable_array(v)}.join(',')})"
else
super
end
end
# If a Sequel::Model instance is given, return it as-is
# instead of attempting to convert it.
def row_type(db_type, v)
if v.is_a?(Sequel::Model)
v
else
super
end
end
end
end
end
Database.register_extension(:_model_pg_row, Plugins::PgRow::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/_pretty_table.rb 0000664 0000000 0000000 00000004446 14342141206 0022535 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# This _pretty_table extension is only for internal use.
# It adds the Sequel::PrettyTable class without modifying
# Sequel::Dataset.
#
# To load the extension:
#
# Sequel.extension :_pretty_table
#
# Related module: Sequel::PrettyTable
#
module Sequel
module PrettyTable
# Prints nice-looking plain-text tables via puts
#
# +--+-------+
# |id|name |
# |--+-------|
# |1 |fasdfas|
# |2 |test |
# +--+-------+
def self.print(records, columns=nil)
puts string(records, columns)
end
# Return the string that #print will print via puts.
def self.string(records, columns = nil) # records is an array of hashes
columns ||= records.first.keys.sort
sizes = column_sizes(records, columns)
sep_line = separator_line(columns, sizes)
array = [sep_line, header_line(columns, sizes), sep_line]
records.each {|r| array << data_line(columns, sizes, r)}
array << sep_line
array.join("\n")
end
# Hash of the maximum size of the value for each column
def self.column_sizes(records, columns) # :nodoc:
sizes = Hash.new(0)
columns.each do |c|
sizes[c] = c.to_s.size
end
records.each do |r|
columns.each do |c|
s = r[c].to_s.size
sizes[c] = s if s > sizes[c]
end
end
sizes
end
# String for each data line
def self.data_line(columns, sizes, record) # :nodoc:
String.new << '|' << columns.map {|c| format_cell(sizes[c], record[c])}.join('|') << '|'
end
# Format the value so it takes up exactly size characters
def self.format_cell(size, v) # :nodoc:
case v
when Integer
"%#{size}d" % v
when Float, BigDecimal
"%#{size}g" % v
else
"%-#{size}s" % v.to_s
end
end
# String for header line
def self.header_line(columns, sizes) # :nodoc:
String.new << '|' << columns.map {|c| "%-#{sizes[c]}s" % c.to_s}.join('|') << '|'
end
# String for separtor line
def self.separator_line(columns, sizes) # :nodoc:
String.new << '+' << columns.map {|c| '-' * sizes[c]}.join('+') << '+'
end
private_class_method :column_sizes, :data_line, :format_cell, :header_line, :separator_line
end
end
sequel-5.63.0/lib/sequel/extensions/any_not_empty.rb 0000664 0000000 0000000 00000002370 14342141206 0022557 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The any_not_empty extension changes the behavior of Dataset#any?
# if called without a block. By default, this method uses the
# standard Enumerable behavior of enumerating results and seeing
# if any result is not false or nil. With this extension, it
# just checks whether the dataset is empty. This approach can
# be much faster if the dataset is currently large.
#
# DB[:table].any?
# # SELECT * FROM table
#
# DB[:table].extension(:any_not_empty).any?
# # SELECT 1 as one FROM table LIMIT 1
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:any_not_empty)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:any_not_empty)
#
# Note that this can result in any? returning a different result if
# the dataset has a row_proc that can return false or nil.
#
# Related module: Sequel::AnyNotEmpty
#
module Sequel
module AnyNotEmpty
# If a block is not given, return whether the dataset is not empty.
def any?
if defined?(yield)
super
else
!empty?
end
end
end
Dataset.register_extension(:any_not_empty, AnyNotEmpty)
end
sequel-5.63.0/lib/sequel/extensions/arbitrary_servers.rb 0000664 0000000 0000000 00000006315 14342141206 0023445 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The arbitrary_servers extension allows you to connect to arbitrary
# servers/shards that were not defined when you created the database.
# To use it, you first load the extension into the Database object:
#
# DB.extension :arbitrary_servers
#
# Then you can pass arbitrary connection options for the server/shard
# to use as a hash:
#
# DB[:table].server(host: '...', database: '...').all
#
# Because Sequel can never be sure that the connection will be reused,
# arbitrary connections are disconnected as soon as the outermost block
# that uses them exits. So this example uses the same connection:
#
# DB.transaction(server: {host: '...', database: '...'}) do |c|
# DB.transaction(server: {host: '...', database: '...'}) do |c2|
# # c == c2
# end
# end
#
# But this example does not:
#
# DB.transaction(server: {host: '...', database: '...'}) do |c|
# end
# DB.transaction(server: {host: '...', database: '...'}) do |c2|
# # c != c2
# end
#
# You can use this extension in conjunction with the server_block
# extension:
#
# DB.with_server(host: '...', database: '...') do
# DB.synchronize do
# # All of these use the host/database given to with_server
# DB[:table].insert(c: 1)
# DB[:table].update(c: 2)
# DB.tables
# DB[:table].all
# end
# end
#
# Anyone using this extension in conjunction with the server_block
# extension may want to do the following to so that you don't need
# to call synchronize separately:
#
# def DB.with_server(*a)
# super(*a){synchronize{yield}}
# end
#
# Note that this extension only works with the sharded threaded connection
# pool. If you are using the sharded single connection pool, you need
# to switch to the sharded threaded connection pool before using this
# extension.
#
# Related module: Sequel::ArbitraryServers
#
module Sequel
module ArbitraryServers
private
# If server is a hash, create a new connection for
# it, and cache it first by thread and then server.
def acquire(thread, server)
if server.is_a?(Hash)
sync{@allocated[thread] ||= {}}[server] = make_new(server)
else
super
end
end
# If server is a hash, the entry for it probably doesn't
# exist in the @allocated hash, so check for existence to
# avoid calling nil.[]
def owned_connection(thread, server)
if server.is_a?(Hash)
if a = sync{@allocated[thread]}
a[server]
end
else
super
end
end
# If server is a hash, return it directly.
def pick_server(server)
if server.is_a?(Hash)
server
else
super
end
end
# If server is a hash, delete the thread from the allocated
# connections for that server. Additionally, if this was the last thread
# using that server, delete the server from the @allocated hash.
def release(thread, conn, server)
if server.is_a?(Hash)
a = @allocated[thread]
a.delete(server)
@allocated.delete(thread) if a.empty?
disconnect_connection(conn)
else
super
end
end
end
Database.register_extension(:arbitrary_servers){|db| db.pool.extend(ArbitraryServers)}
end
sequel-5.63.0/lib/sequel/extensions/async_thread_pool.rb 0000664 0000000 0000000 00000037442 14342141206 0023377 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The async_thread_pool extension adds support for running database
# queries in a separate threads using a thread pool. With the following
# code
#
# DB.extension :async_thread_pool
# foos = DB[:foos].async.where(name: 'A'..'M').all
# bar_names = DB[:bar].async.select_order_map(:name)
# baz_1 = DB[:bazes].async.first(id: 1)
#
# All 3 queries will be run in separate threads. +foos+, +bar_names+
# and +baz_1+ will be proxy objects. Calling a method on the proxy
# object will wait for the query to be run, and will return the result
# of calling that method on the result of the query method. For example,
# if you run:
#
# foos = DB[:foos].async.where(name: 'A'..'M').all
# bar_names = DB[:bars].async.select_order_map(:name)
# baz_1 = DB[:bazes].async.first(id: 1)
# sleep(1)
# foos.size
# bar_names.first
# baz_1.name
#
# These three queries will generally be run concurrently in separate
# threads. If you instead run:
#
# DB[:foos].async.where(name: 'A'..'M').all.size
# DB[:bars].async.select_order_map(:name).first
# DB[:bazes].async.first(id: 1).name
#
# Then will run each query sequentially, since you need the result of
# one query before running the next query. The queries will still be
# run in separate threads (by default).
#
# What is run in the separate thread is the entire method call that
# returns results. So with the original example:
#
# foos = DB[:foos].async.where(name: 'A'..'M').all
# bar_names = DB[:bars].async.select_order_map(:name)
# baz_1 = DB[:bazes].async.first(id: 1)
#
# The +all+, select_order_map(:name), and first(id: 1)
# calls are run in separate threads. If a block is passed to a method
# such as +all+ or +each+, the block is also run in that thread. If you
# have code such as:
#
# h = {}
# DB[:foos].async.each{|row| h[row[:id]] = row}
# bar_names = DB[:bars].async.select_order_map(:name)
# p h
#
# You may end up with it printing an empty hash or partial hash, because the
# async +each+ call will not have run or finished running. Since the
# p h code relies on a side-effect of the +each+ block and not the
# return value of the +each+ call, it will not wait for the loading.
#
# You should avoid using +async+ for any queries where you are ignoring the
# return value, as otherwise you have no way to wait for the query to be run.
#
# Datasets that use async will use async threads to load data for the majority
# of methods that can return data. However, dataset methods that return
# enumerators will not use an async thread (e.g. calling # Dataset#map
# without a block or arguments does not use an async thread or return a
# proxy object).
#
# Because async methods (including their blocks) run in a separate thread, you
# should not use control flow modifiers such as +return+ or +break+ in async
# queries. Doing so will result in a error.
#
# Because async results are returned as proxy objects, it's a bad idea
# to use them in a boolean setting:
#
# result = DB[:foo].async.get(:boolean_column)
# # or:
# result = DB[:foo].async.first
#
# # ...
# if result
# # will always execute this banch, since result is a proxy object
# end
#
# In this case, you can call the +__value+ method to return the actual
# result:
#
# if result.__value
# # will not execute this branch if the dataset method returned nil or false
# end
#
# Similarly, because a proxy object is used, you should be careful using the
# result in a case statement or an argument to Class#===:
#
# # ...
# case result
# when Hash, true, false
# # will never take this branch, since result is a proxy object
# end
#
# Similar to usage in an +if+ statement, you should use +__value+:
#
# case result.__value
# when Hash, true, false
# # will never take this branch, since result is a proxy object
# end
#
# On Ruby 2.2+, you can use +itself+ instead of +__value+. It's preferable to
# use +itself+ if you can, as that will allow code to work with both proxy
# objects and regular objects.
#
# Because separate threads and connections are used for async queries,
# they do not use any state on the current connection/thread. So if
# you do:
#
# DB.transaction{DB[:table].async.all}
#
# Be aware that the transaction runs on one connection, and the SELECT
# query on a different connection. If you use currently using
# transactional testing (running each test inside a transaction/savepoint),
# and want to start using this extension, you should first switch to
# non-transactional testing of the code that will use the async thread
# pool before using this extension, as otherwise the use of
# Dataset#async will likely break your tests.
#
# If you are using Database#synchronize to checkout a connection, the
# same issue applies, where the async query runs on a different
# connection:
#
# DB.synchronize{DB[:table].async.all}
#
# Similarly, if you are using the server_block extension, any async
# queries inside with_server blocks will not use the server specified:
#
# DB.with_server(:shard1) do
# DB[:a].all # Uses shard1
# DB[:a].async.all # Uses default shard
# end
#
# You need to manually specify the shard for any dataset using an async
# query:
#
# DB.with_server(:shard1) do
# DB[:a].all # Uses shard1
# DB[:a].async.server(:shard1).all # Uses shard1
# end
#
# When the async_thread_pool extension, the size of the async thread pool
# can be set by using the +:num_async_threads+ Database option, which must
# be set before loading the async_thread_pool extension. This defaults
# to the size of the Database object's connection pool.
#
# By default, for consistent behavior, the async_thread_pool extension
# will always run the query in a separate thread. However, in some cases,
# such as when the async thread pool is busy and the results of a query
# are needed right away, it can improve performance to allow preemption,
# so that the query will run in the current thread instead of waiting
# for an async thread to become available. With the following code:
#
# foos = DB[:foos].async.where(name: 'A'..'M').all
# bar_names = DB[:bar].async.select_order_map(:name)
# if foos.length > 4
# baz_1 = DB[:bazes].async.first(id: 1)
# end
#
# Whether you need the +baz_1+ variable depends on the value of foos.
# If the async thread pool is busy, and by the time the +foos.length+
# call is made, the async thread pool has not started the processing
# to get the +foos+ value, it can improve performance to start that
# processing in the current thread, since it is needed immediately to
# determine whether to schedule query to get the +baz_1+ variable.
# The default is to not allow preemption, because if the current
# thread is used, it may have already checked out a connection that
# could be used, and that connection could be inside a transaction or
# have some other manner of connection-specific state applied to it.
# If you want to allow preemption, you can set the
# +:preempt_async_thread+ Database option before loading the
# async_thread_pool extension.
#
# Related module: Sequel::Database::AsyncThreadPool::DatasetMethods
#
module Sequel
module Database::AsyncThreadPool
# JobProcessor is a wrapper around a single thread, that will
# process a queue of jobs until it is shut down.
class JobProcessor # :nodoc:
def self.create_finalizer(queue, pool)
proc{run_finalizer(queue, pool)}
end
def self.run_finalizer(queue, pool)
# Push a nil for each thread using the queue, signalling
# that thread to close.
pool.each{queue.push(nil)}
# Join each of the closed threads.
pool.each(&:join)
# Clear the thread pool. Probably not necessary, but this allows
# for a simple way to check whether this finalizer has been run.
pool.clear
nil
end
private_class_method :run_finalizer
def initialize(queue)
@thread = ::Thread.new do
while proxy = queue.pop
proxy.__send__(:__run)
end
end
end
# Join the thread, should only be called by the related finalizer.
def join
@thread.join
end
end
# Wrapper for exception instances raised by async jobs. The
# wrapped exception will be raised by the code getting the value
# of the job.
WrappedException = Struct.new(:exception)
# Base proxy object class for jobs processed by async threads and
# the returned result.
class BaseProxy < BasicObject
# Store a block that returns the result when called.
def initialize(&block)
::Kernel.raise Error, "must provide block for an async job" unless block
@block = block
end
# Pass all method calls to the returned result.
def method_missing(*args, &block)
__value.public_send(*args, &block)
end
# :nocov:
ruby2_keywords(:method_missing) if respond_to?(:ruby2_keywords, true)
# :nocov:
# Delegate respond_to? calls to the returned result.
def respond_to_missing?(*args)
__value.respond_to?(*args)
end
# Override some methods defined by default so they apply to the
# returned result and not the current object.
[:!, :==, :!=, :instance_eval, :instance_exec].each do |method|
define_method(method) do |*args, &block|
__value.public_send(method, *args, &block)
end
end
# Wait for the value to be loaded if it hasn't already been loaded.
# If the code to load the return value raised an exception that was
# wrapped, reraise the exception.
def __value
unless defined?(@value)
__get_value
end
if @value.is_a?(WrappedException)
::Kernel.raise @value
end
@value
end
private
# Run the block and return the block value. If the block call raises
# an exception, wrap the exception.
def __run_block
# This may not catch concurrent calls (unless surrounded by a mutex), but
# it's not worth trying to protect against that. It's enough to just check for
# multiple non-concurrent calls.
::Kernel.raise Error, "Cannot run async block multiple times" unless block = @block
@block = nil
begin
block.call
rescue ::Exception => e
WrappedException.new(e)
end
end
end
# Default object class for async job/proxy result. This uses a queue for
# synchronization. The JobProcessor will push a result until the queue,
# and the code to get the value will pop the result from that queue (and
# repush the result to handle thread safety).
class Proxy < BaseProxy
def initialize
super
@queue = ::Queue.new
end
private
def __run
@queue.push(__run_block)
end
def __get_value
@value = @queue.pop
# Handle thread-safety by repushing the popped value, so that
# concurrent calls will receive the same value
@queue.push(@value)
end
end
# Object class for async job/proxy result when the :preempt_async_thread
# Database option is used. Uses a mutex for synchronization, and either
# the JobProcessor or the calling thread can run code to get the value.
class PreemptableProxy < BaseProxy
def initialize
super
@mutex = ::Mutex.new
end
private
def __get_value
@mutex.synchronize do
unless defined?(@value)
@value = __run_block
end
end
end
alias __run __get_value
end
module DatabaseMethods
def self.extended(db)
db.instance_exec do
unless pool.pool_type == :threaded || pool.pool_type == :sharded_threaded
raise Error, "can only load async_thread_pool extension if using threaded or sharded_threaded connection pool"
end
num_async_threads = opts[:num_async_threads] ? typecast_value_integer(opts[:num_async_threads]) : (Integer(opts[:max_connections] || 4))
raise Error, "must have positive number for num_async_threads" if num_async_threads <= 0
proxy_klass = typecast_value_boolean(opts[:preempt_async_thread]) ? PreemptableProxy : Proxy
define_singleton_method(:async_job_class){proxy_klass}
queue = @async_thread_queue = Queue.new
pool = @async_thread_pool = num_async_threads.times.map{JobProcessor.new(queue)}
ObjectSpace.define_finalizer(db, JobProcessor.create_finalizer(queue, pool))
extend_datasets(DatasetMethods)
end
end
private
# Wrap the block in a job/proxy object and schedule it to run using the async thread pool.
def async_run(&block)
proxy = async_job_class.new(&block)
@async_thread_queue.push(proxy)
proxy
end
end
ASYNC_METHODS = ([:all?, :any?, :drop, :entries, :grep_v, :include?, :inject, :member?, :minmax, :none?, :one?, :reduce, :sort, :take, :tally, :to_a, :to_h, :uniq, :zip] & Enumerable.instance_methods) + (Dataset::ACTION_METHODS - [:map, :paged_each])
ASYNC_BLOCK_METHODS = ([:collect, :collect_concat, :detect, :drop_while, :each_cons, :each_entry, :each_slice, :each_with_index, :each_with_object, :filter_map, :find, :find_all, :find_index, :flat_map, :max_by, :min_by, :minmax_by, :partition, :reject, :reverse_each, :sort_by, :take_while] & Enumerable.instance_methods) + [:paged_each]
ASYNC_ARGS_OR_BLOCK_METHODS = [:map]
module DatasetMethods
# Define an method in the given module that will run the given method using an async thread
# if the current dataset is async.
def self.define_async_method(mod, method)
mod.send(:define_method, method) do |*args, &block|
if @opts[:async]
ds = sync
db.send(:async_run){ds.send(method, *args, &block)}
else
super(*args, &block)
end
end
end
# Define an method in the given module that will run the given method using an async thread
# if the current dataset is async and a block is provided.
def self.define_async_block_method(mod, method)
mod.send(:define_method, method) do |*args, &block|
if block && @opts[:async]
ds = sync
db.send(:async_run){ds.send(method, *args, &block)}
else
super(*args, &block)
end
end
end
# Define an method in the given module that will run the given method using an async thread
# if the current dataset is async and arguments or a block is provided.
def self.define_async_args_or_block_method(mod, method)
mod.send(:define_method, method) do |*args, &block|
if (block || !args.empty?) && @opts[:async]
ds = sync
db.send(:async_run){ds.send(method, *args, &block)}
else
super(*args, &block)
end
end
end
# Override all of the methods that return results to do the processing in an async thread
# if they have been marked to run async and should run async (i.e. they don't return an
# Enumerator).
ASYNC_METHODS.each{|m| define_async_method(self, m)}
ASYNC_BLOCK_METHODS.each{|m| define_async_block_method(self, m)}
ASYNC_ARGS_OR_BLOCK_METHODS.each{|m| define_async_args_or_block_method(self, m)}
# Return a cloned dataset that will load results using the async thread pool.
def async
cached_dataset(:_async) do
clone(:async=>true)
end
end
# Return a cloned dataset that will not load results using the async thread pool.
# Only used if the current dataset has been marked as using the async thread pool.
def sync
cached_dataset(:_sync) do
clone(:async=>false)
end
end
end
end
Database.register_extension(:async_thread_pool, Database::AsyncThreadPool::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/auto_literal_strings.rb 0000664 0000000 0000000 00000004214 14342141206 0024126 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The auto_literal_strings extension treats string values passed as filter
# arguments as SQL query fragments. This is the behavior of previous
# versions of Sequel. Using this extension makes using raw SQL fragments
# easier, since you don't need to wrap them with Sequel.lit, but also makes
# it easier to introduce SQL injection vulnerabilities into the application.
# It is only recommended to use this extension for
# backwards compatibility with previous versions of Sequel.
#
# With this extension, if a single string is given, it is used as an SQL
# query fragment:
#
# ds = DB[:table].extension(:auto_literal_strings)
# ds.where("name > 'A'")
# # SELECT * FROM table WHERE (name > 'A')
#
# If additional arguments are given, they are used as placeholders:
#
# ds.where("name > ?", "A")
# # SELECT * FROM table WHERE (name > 'A')
#
# Named placeholders can also be used with a hash:
#
# ds.where("name > :a", a: "A")
# # SELECT * FROM table WHERE (name > 'A')
#
# This extension also allows the use of a plain string passed to Dataset#update:
#
# ds.update("column = column + 1")
# # UPDATE table SET column = column + 1
#
# Related module: Sequel::Dataset::AutoLiteralStrings
#
module Sequel
class Dataset
module AutoLiteralStrings
# Treat plain strings as literal strings, and arrays where the first element
# is a string as a literal string with placeholders.
def filter_expr(expr = nil)
case expr
when LiteralString
super
when String
super(LiteralString.new(expr))
when Array
if (sexpr = expr.first).is_a?(String)
super(SQL::PlaceholderLiteralString.new(sexpr, expr[1..-1], true))
else
super
end
else
super
end
end
# Treat plain strings as literal strings.
def update_sql(values=OPTS)
case values
when LiteralString
super
when String
super(LiteralString.new(values))
else
super
end
end
end
register_extension(:auto_literal_strings, AutoLiteralStrings)
end
end
sequel-5.63.0/lib/sequel/extensions/blank.rb 0000664 0000000 0000000 00000001633 14342141206 0020762 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The blank extension adds the blank? method to all objects (e.g. Object#blank?).
#
# To load the extension:
#
# Sequel.extension :blank
[FalseClass, Object, NilClass, Numeric, String, TrueClass].each do |klass|
# :nocov:
if klass.method_defined?(:blank?)
klass.send(:alias_method, :blank?, :blank?)
end
# :nocov:
end
class FalseClass
# false is always blank
def blank?
true
end
end
class Object
# Objects are blank if they respond true to empty?
def blank?
respond_to?(:empty?) && empty?
end
end
class NilClass
# nil is always blank
def blank?
true
end
end
class Numeric
# Numerics are never blank (not even 0)
def blank?
false
end
end
class String
# Strings are blank if they are empty or include only whitespace
def blank?
strip.empty?
end
end
class TrueClass
# true is never blank
def blank?
false
end
end
sequel-5.63.0/lib/sequel/extensions/caller_logging.rb 0000664 0000000 0000000 00000004512 14342141206 0022642 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The caller_logging extension includes caller information before
# query logging, showing which code caused the query. It skips
# internal Sequel code, showing the first non-Sequel caller line.
#
# DB.extension :caller_logging
# DB[:table].first
# # Logger:
# # (0.000041s) (source: /path/to/app/foo/t.rb:12 in `get_first`) SELECT * FROM table LIMIT 1
#
# You can further filter the caller lines by setting
# Database#caller_logging_ignore to a regexp of additional
# caller lines to ignore. This is useful if you have specific
# methods or internal extensions/plugins that you would also
# like to ignore as they obscure the code actually making the
# request.
#
# DB.caller_logging_ignore = %r{/path/to/app/lib/plugins}
#
# You can also format the caller before it is placed in the logger,
# using +caller_logging_formatter+:
#
# DB.caller_logging_formatter = lambda do |caller|
# "(#{caller.sub(/\A\/path\/to\/app\//, '')})"
# end
# DB[:table].first
# # Logger:
# # (0.000041s) (foo/t.rb:12 in `get_first`) SELECT * FROM table LIMIT 1
#
# Related module: Sequel::CallerLogging
require 'rbconfig'
#
module Sequel
module CallerLogging
SEQUEL_LIB_PATH = (File.expand_path('../../..', __FILE__) + '/').freeze
# A regexp of caller lines to ignore, in addition to internal Sequel and Ruby code.
attr_accessor :caller_logging_ignore
# A callable to format the external caller
attr_accessor :caller_logging_formatter
# Include caller information when logging query.
def log_connection_yield(sql, conn, args=nil)
if !@loggers.empty? && (external_caller = external_caller_for_log)
sql = "#{external_caller} #{sql}"
end
super
end
private
# The caller to log, ignoring internal Sequel and Ruby code, and user specified
# lines to ignore.
def external_caller_for_log
ignore = caller_logging_ignore
c = caller.find do |line|
!(line.start_with?(SEQUEL_LIB_PATH) ||
line.start_with?(RbConfig::CONFIG["rubylibdir"]) ||
(ignore && line =~ ignore))
end
if c
c = if formatter = caller_logging_formatter
formatter.call(c)
else
"(source: #{c})"
end
end
c
end
end
Database.register_extension(:caller_logging, CallerLogging)
end
sequel-5.63.0/lib/sequel/extensions/columns_introspection.rb 0000664 0000000 0000000 00000005137 14342141206 0024336 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The columns_introspection extension attempts to introspect the
# selected columns for a dataset before issuing a query. If it
# thinks it can guess correctly at the columns the query will use,
# it will return the columns without issuing a database query.
#
# This method is not fool-proof, it's possible that some databases
# will use column names that Sequel does not expect. Also, it
# may not correctly handle all cases.
#
# To attempt to introspect columns for a single dataset:
#
# ds = ds.extension(:columns_introspection)
#
# To attempt to introspect columns for all datasets on a single database:
#
# DB.extension(:columns_introspection)
#
# Related module: Sequel::ColumnsIntrospection
#
module Sequel
module ColumnsIntrospection
# Attempt to guess the columns that will be returned
# if there are columns selected, in order to skip a database
# query to retrieve the columns. This should work with
# Symbols, SQL::Identifiers, SQL::QualifiedIdentifiers, and
# SQL::AliasedExpressions.
def columns
if cols = _columns
return cols
end
if (pcs = probable_columns) && pcs.all?
self.columns = pcs
else
super
end
end
protected
# Return an array of probable column names for the dataset, or
# nil if it is not possible to determine that through
# introspection.
def probable_columns
if (cols = opts[:select]) && !cols.empty?
cols.map{|c| probable_column_name(c)}
elsif !opts[:join] && !opts[:with] && (from = opts[:from]) && from.length == 1 && (from = from.first)
if from.is_a?(SQL::AliasedExpression)
from = from.expression
end
case from
when Dataset
from.probable_columns
when Symbol, SQL::Identifier, SQL::QualifiedIdentifier
schemas = db.instance_variable_get(:@schemas)
if schemas && (table = literal(from)) && (sch = Sequel.synchronize{schemas[table]})
sch.map{|c,_| c}
end
end
end
end
private
# Return the probable name of the column, or nil if one
# cannot be determined.
def probable_column_name(c)
case c
when Symbol
_, c, a = split_symbol(c)
(a || c).to_sym
when SQL::Identifier
c.value.to_sym
when SQL::QualifiedIdentifier
c.column.to_sym
when SQL::AliasedExpression
a = c.alias
a.is_a?(SQL::Identifier) ? a.value.to_sym : a.to_sym
end
end
end
Dataset.register_extension(:columns_introspection, Sequel::ColumnsIntrospection)
end
sequel-5.63.0/lib/sequel/extensions/connection_expiration.rb 0000664 0000000 0000000 00000006350 14342141206 0024275 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The connection_expiration extension modifies a database's
# connection pool to validate that connections checked out
# from the pool are not expired, before yielding them for
# use. If it detects an expired connection, it removes it
# from the pool and tries the next available connection,
# creating a new connection if no available connection is
# unexpired. Example of use:
#
# DB.extension(:connection_expiration)
#
# The default connection timeout is 14400 seconds (4 hours).
# To override it:
#
# DB.pool.connection_expiration_timeout = 3600 # 1 hour
#
# Note that this extension only affects the default threaded
# and the sharded threaded connection pool. The single
# threaded and sharded single threaded connection pools are
# not affected. As the only reason to use the single threaded
# pools is for speed, and this extension makes the connection
# pool slower, there's not much point in modifying this
# extension to work with the single threaded pools. The
# threaded pools work fine even in single threaded code, so if
# you are currently using a single threaded pool and want to
# use this extension, switch to using a threaded pool.
#
# Related module: Sequel::ConnectionExpiration
#
module Sequel
module ConnectionExpiration
class Retry < Error; end
Sequel::Deprecation.deprecate_constant(self, :Retry)
# The number of seconds that need to pass since
# connection creation before expiring a connection.
# Defaults to 14400 seconds (4 hours).
attr_accessor :connection_expiration_timeout
# The maximum number of seconds that will be added as a random delay to the expiration timeout
# Defaults to 0 seconds (no random delay).
attr_accessor :connection_expiration_random_delay
# Initialize the data structures used by this extension.
def self.extended(pool)
pool.instance_exec do
sync do
@connection_expiration_timestamps ||= {}
@connection_expiration_timeout ||= 14400
@connection_expiration_random_delay ||= 0
end
end
end
private
# Clean up expiration timestamps during disconnect.
def disconnect_connection(conn)
sync{@connection_expiration_timestamps.delete(conn)}
super
end
# Record the time the connection was created.
def make_new(*)
conn = super
@connection_expiration_timestamps[conn] = [Sequel.start_timer, @connection_expiration_timeout + (rand * @connection_expiration_random_delay)].freeze
conn
end
# When acquiring a connection, check if the connection is expired.
# If it is expired, disconnect the connection, and retry with a new
# connection.
def acquire(*a)
conn = nil
1.times do
if (conn = super) &&
(cet = sync{@connection_expiration_timestamps[conn]}) &&
Sequel.elapsed_seconds_since(cet[0]) > cet[1]
if pool_type == :sharded_threaded
sync{allocated(a.last).delete(Sequel.current)}
else
sync{@allocated.delete(Sequel.current)}
end
disconnect_connection(conn)
redo
end
end
conn
end
end
Database.register_extension(:connection_expiration){|db| db.pool.extend(ConnectionExpiration)}
end
sequel-5.63.0/lib/sequel/extensions/connection_validator.rb 0000664 0000000 0000000 00000010473 14342141206 0024101 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The connection_validator extension modifies a database's
# connection pool to validate that connections checked out
# from the pool are still valid, before yielding them for
# use. If it detects an invalid connection, it removes it
# from the pool and tries the next available connection,
# creating a new connection if no available connection is
# valid. Example of use:
#
# DB.extension(:connection_validator)
#
# As checking connections for validity involves issuing a
# query, which is potentially an expensive operation,
# the validation checks are only run if the connection has
# been idle for longer than a certain threshold. By default,
# that threshold is 3600 seconds (1 hour), but it can be
# modified by the user, set to -1 to always validate
# connections on checkout:
#
# DB.pool.connection_validation_timeout = -1
#
# Note that if you set the timeout to validate connections
# on every checkout, you should probably manually control
# connection checkouts on a coarse basis, using
# Database#synchronize. In a web application, the optimal
# place for that would be a rack middleware. Validating
# connections on every checkout without setting up coarse
# connection checkouts will hurt performance, in some cases
# significantly. Note that setting up coarse connection
# checkouts reduces the concurrency level acheivable. For
# example, in a web application, using Database#synchronize
# in a rack middleware will limit the number of concurrent
# web requests to the number to connections in the database
# connection pool.
#
# Note that this extension only affects the default threaded
# and the sharded threaded connection pool. The single
# threaded and sharded single threaded connection pools are
# not affected. As the only reason to use the single threaded
# pools is for speed, and this extension makes the connection
# pool slower, there's not much point in modifying this
# extension to work with the single threaded pools. The
# threaded pools work fine even in single threaded code, so if
# you are currently using a single threaded pool and want to
# use this extension, switch to using a threaded pool.
#
# Related module: Sequel::ConnectionValidator
#
module Sequel
module ConnectionValidator
class Retry < Error; end
Sequel::Deprecation.deprecate_constant(self, :Retry)
# The number of seconds that need to pass since
# connection checkin before attempting to validate
# the connection when checking it out from the pool.
# Defaults to 3600 seconds (1 hour).
attr_accessor :connection_validation_timeout
# Initialize the data structures used by this extension.
def self.extended(pool)
pool.instance_exec do
sync do
@connection_timestamps ||= {}
@connection_validation_timeout ||= 3600
end
end
# Make sure the valid connection SQL query is precached,
# otherwise it's possible it will happen at runtime. While
# it should work correctly at runtime, it's better to avoid
# the possibility of failure altogether.
pool.db.send(:valid_connection_sql)
end
private
# Record the time the connection was checked back into the pool.
def checkin_connection(*)
conn = super
@connection_timestamps[conn] = Sequel.start_timer
conn
end
# Clean up timestamps during disconnect.
def disconnect_connection(conn)
sync{@connection_timestamps.delete(conn)}
super
end
# When acquiring a connection, if it has been
# idle for longer than the connection validation timeout,
# test the connection for validity. If it is not valid,
# disconnect the connection, and retry with a new connection.
def acquire(*a)
conn = nil
1.times do
if (conn = super) &&
(timer = sync{@connection_timestamps.delete(conn)}) &&
Sequel.elapsed_seconds_since(timer) > @connection_validation_timeout &&
!db.valid_connection?(conn)
if pool_type == :sharded_threaded
sync{allocated(a.last).delete(Sequel.current)}
else
sync{@allocated.delete(Sequel.current)}
end
disconnect_connection(conn)
redo
end
end
conn
end
end
Database.register_extension(:connection_validator){|db| db.pool.extend(ConnectionValidator)}
end
sequel-5.63.0/lib/sequel/extensions/constant_sql_override.rb 0000664 0000000 0000000 00000003672 14342141206 0024307 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The constant_sql_override extension allows you to change the SQL
# generated for Sequel constants.
#
# One possible use-case for this is to have Sequel::CURRENT_TIMESTAMP use UTC time when
# you have Sequel.database_timezone = :utc, but the database uses localtime when
# generating CURRENT_TIMESTAMP.
#
# You can set SQL overrides with Database#set_constant_sql:
#
# DB.set_constant_sql(Sequel::CURRENT_TIMESTAMP, "CURRENT_TIMESTAMP AT TIME ZONE 'UTC'")
#
# Now, using Sequel::CURRENT_TIMESTAMP will use your override instead:
#
# Album.where(released_at: Sequel::CURRENT_TIMESTAMP).sql
# # => SELECT "albums.*" FROM "albums" WHERE ("released_at" = CURRENT_TIMESTAMP AT TIME ZONE 'UTC')
#
# To use this extension, first load it into your Sequel::Database instance:
#
# DB.extension :constant_sql_override
#
# Related module: Sequel::ConstantSqlOverride
#
module Sequel
module ConstantSqlOverride
module DatabaseMethods
# Create the initial empty hash of constant sql overrides.
def self.extended(db)
db.instance_exec do
@constant_sqls ||= {}
extend_datasets(DatasetMethods)
end
end
# Hash mapping constant symbols to SQL. For internal use only.
attr_reader :constant_sqls # :nodoc:
# Set the SQL to use for the given Sequel::SQL::Constant
def set_constant_sql(constant, override)
@constant_sqls[constant.constant] = override
end
# Freeze the constant_sqls hash to prevent adding new overrides.
def freeze
@constant_sqls.freeze
super
end
end
module DatasetMethods
# Use overridden constant SQL
def constant_sql_append(sql, constant)
if constant_sql = db.constant_sqls[constant]
sql << constant_sql
else
super
end
end
end
end
Database.register_extension(:constant_sql_override, ConstantSqlOverride::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/constraint_validations.rb 0000664 0000000 0000000 00000050475 14342141206 0024464 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The constraint_validations extension is designed to easily create database
# constraints inside create_table and alter_table blocks. It also adds
# relevant metadata about the constraints to a separate table, which the
# constraint_validations model plugin uses to setup automatic validations.
#
# To use this extension, you first need to load it into the database:
#
# DB.extension(:constraint_validations)
#
# Note that you should only need to do this when modifying the constraint
# validations (i.e. when migrating). You should probably not load this
# extension in general application code.
#
# You also need to make sure to add the metadata table for the automatic
# validations. By default, this table is called sequel_constraint_validations.
#
# DB.create_constraint_validations_table
#
# This table should only be created once. For new applications, you
# generally want to create it first, before creating any other application
# tables.
#
# Because migrations instance_exec the up and down blocks on a database,
# using this extension in a migration can be done via:
#
# Sequel.migration do
# up do
# extension(:constraint_validations)
# # ...
# end
# down do
# extension(:constraint_validations)
# # ...
# end
# end
#
# However, note that you cannot use change migrations with this extension,
# you need to use separate up/down migrations.
#
# The API for creating the constraints with automatic validations is
# similar to the validation_helpers model plugin API. However,
# instead of having separate validates_* methods, it just adds a validate
# method that accepts a block to the schema generators. Like the
# create_table and alter_table blocks, this block is instance_execed and
# offers its own DSL. Example:
#
# DB.create_table(:table) do
# Integer :id
# String :name
#
# validate do
# presence :id
# min_length 5, :name
# end
# end
#
# instance_exec is used in this case because create_table and alter_table
# already use instance_exec, so losing access to the surrounding receiver
# is not an issue.
#
# Here's a breakdown of the constraints created for each constraint validation
# method:
#
# All constraints except unique unless :allow_nil is true :: CHECK column IS NOT NULL
# presence (String column) :: CHECK trim(column) != ''
# exact_length 5 :: CHECK char_length(column) = 5
# min_length 5 :: CHECK char_length(column) >= 5
# max_length 5 :: CHECK char_length(column) <= 5
# length_range 3..5 :: CHECK char_length(column) >= 3 AND char_length(column) <= 5
# length_range 3...5 :: CHECK char_length(column) >= 3 AND char_length(column) < 5
# format /foo\\d+/ :: CHECK column ~ 'foo\\d+'
# format /foo\\d+/i :: CHECK column ~* 'foo\\d+'
# like 'foo%' :: CHECK column LIKE 'foo%' ESCAPE '\'
# ilike 'foo%' :: CHECK column ILIKE 'foo%' ESCAPE '\'
# includes ['a', 'b'] :: CHECK column IN ('a', 'b')
# includes [1, 2] :: CHECK column IN (1, 2)
# includes 3..5 :: CHECK column >= 3 AND column <= 5
# includes 3...5 :: CHECK column >= 3 AND column < 5
# operator :>, 1 :: CHECK column > 1
# operator :>=, 2 :: CHECK column >= 2
# operator :<, "M" :: CHECK column < 'M'
# operator :<=, 'K' :: CHECK column <= 'K'
# unique :: UNIQUE (column)
#
# There are some additional API differences:
#
# * Only the :message and :allow_nil options are respected. The :allow_blank
# and :allow_missing options are not respected.
# * A new option, :name, is respected, for providing the name of the constraint. It is highly
# recommended that you provide a name for all constraint validations, as
# otherwise, it is difficult to drop the constraints later.
# * The includes validation only supports an array of strings, and array of
# integers, and a range of integers.
# * There are like and ilike validations, which are similar to the format
# validation but use a case sensitive or case insensitive LIKE pattern. LIKE
# patters are very simple, so many regexp patterns cannot be expressed by
# them, but only a couple databases (PostgreSQL and MySQL) support regexp
# patterns.
# * The operator validation only supports >, >=, <, and <= operators, and the
# argument must be a string or an integer.
# * When using the unique validation, column names cannot have embedded commas.
# For similar reasons, when using an includes validation with an array of
# strings, none of the strings in the array can have embedded commas.
# * The unique validation does not support an arbitrary number of columns.
# For a single column, just the symbol should be used, and for an array
# of columns, an array of symbols should be used. There is no support
# for creating two separate unique validations for separate columns in
# a single call.
# * A drop method can be called with a constraint name in a alter_table
# validate block to drop an existing constraint and the related
# validation metadata.
# * While it is allowed to create a presence constraint with :allow_nil
# set to true, doing so does not create a constraint unless the column
# has String type.
#
# Note that this extension has the following issues on certain databases:
#
# * MySQL does not support check constraints (they are parsed but ignored),
# so using this extension does not actually set up constraints on MySQL,
# except for the unique constraint. It can still be used on MySQL to
# add the validation metadata so that the plugin can setup automatic
# validations.
# * On SQLite, adding constraints to a table is not supported, so it must
# be emulated by dropping the table and recreating it with the constraints.
# If you want to use this plugin on SQLite with an alter_table block,
# you should drop all constraint validation metadata using
# drop_constraint_validations_for(table: 'table'), and then
# readd all constraints you want to use inside the alter table block,
# making no other changes inside the alter_table block.
#
# Dropping a table will automatically delete all constraint validations for
# that table. However, altering a table (e.g. to drop a column) will not
# currently make any changes to the constraint validations metadata.
#
# Related module: Sequel::ConstraintValidations
#
module Sequel
module ConstraintValidations
# The default table name used for the validation metadata.
DEFAULT_CONSTRAINT_VALIDATIONS_TABLE = :sequel_constraint_validations
OPERATORS = {:< => :lt, :<= => :lte, :> => :gt, :>= => :gte}.freeze
REVERSE_OPERATOR_MAP = {:str_lt => :<, :str_lte => :<=, :str_gt => :>, :str_gte => :>=,
:int_lt => :<, :int_lte => :<=, :int_gt => :>, :int_gte => :>=}.freeze
# Set the default validation metadata table name if it has not already
# been set.
def self.extended(db)
db.constraint_validations_table ||= DEFAULT_CONSTRAINT_VALIDATIONS_TABLE
end
# This is the DSL class used for the validate block inside create_table and
# alter_table.
class Generator
# Store the schema generator that encloses this validates block.
def initialize(generator)
@generator = generator
end
# Create constraint validation methods that don't take an argument
%w'presence unique'.each do |v|
class_eval(<<-END, __FILE__, __LINE__+1)
def #{v}(columns, opts=OPTS)
@generator.validation({:type=>:#{v}, :columns=>Array(columns)}.merge!(opts))
end
END
end
# Create constraint validation methods that take an argument
%w'exact_length min_length max_length length_range format like ilike includes'.each do |v|
class_eval(<<-END, __FILE__, __LINE__+1)
def #{v}(arg, columns, opts=OPTS)
@generator.validation({:type=>:#{v}, :columns=>Array(columns), :arg=>arg}.merge!(opts))
end
END
end
# Create operator validation. The op should be either +:>+, +:>=+, +:<+, or +:<=+, and
# the arg should be either a string or an integer.
def operator(op, arg, columns, opts=OPTS)
raise Error, "invalid operator (#{op}) used when creating operator validation" unless suffix = OPERATORS[op]
prefix = case arg
when String
"str"
when Integer
"int"
else
raise Error, "invalid argument (#{arg.inspect}) used when creating operator validation"
end
@generator.validation({:type=>:"#{prefix}_#{suffix}", :columns=>Array(columns), :arg=>arg}.merge!(opts))
end
# Given the name of a constraint, drop that constraint from the database,
# and remove the related validation metadata.
def drop(constraint)
@generator.validation({:type=>:drop, :name=>constraint})
end
# Alias of instance_exec for a nicer API.
def process(&block)
instance_exec(&block)
end
end
# Additional methods for the create_table generator to support constraint validations.
module CreateTableGeneratorMethods
# An array of stored validation metadata, used later by the database to create
# constraints.
attr_reader :validations
# Add a validation metadata hash to the stored array.
def validation(opts)
@validations << opts
end
# Call into the validate DSL for creating constraint validations.
def validate(&block)
Generator.new(self).process(&block)
end
end
# Additional methods for the alter_table generator to support constraint validations,
# used to give it a more similar API to the create_table generator.
module AlterTableGeneratorMethods
include CreateTableGeneratorMethods
# Alias of add_constraint for similarity to create_table generator.
def constraint(*args)
add_constraint(*args)
end
# Alias of add_unique_constraint for similarity to create_table generator.
def unique(*args)
add_unique_constraint(*args)
end
end
# The name of the table storing the validation metadata. If modifying this
# from the default, this should be changed directly after loading the
# extension into the database
attr_accessor :constraint_validations_table
# Create the table storing the validation metadata for all of the
# constraints created by this extension.
def create_constraint_validations_table
create_table(constraint_validations_table) do
String :table, :null=>false
String :constraint_name
String :validation_type, :null=>false
String :column, :null=>false
String :argument
String :message
TrueClass :allow_nil
end
end
# Modify the default create_table generator to include
# the constraint validation methods.
def create_table_generator(&block)
super do
extend CreateTableGeneratorMethods
@validations = []
instance_exec(&block) if block
end
end
# Drop all constraint validations for a table if dropping the table.
def drop_table(*names)
names.each do |name|
if !name.is_a?(Hash) && table_exists?(constraint_validations_table)
drop_constraint_validations_for(:table=>name)
end
end
super
end
# Drop the constraint validations table.
def drop_constraint_validations_table
drop_table(constraint_validations_table)
end
# Delete validation metadata for specific constraints. At least
# one of the following options should be specified:
#
# :table :: The table containing the constraint
# :column :: The column affected by the constraint
# :constraint :: The name of the related constraint
#
# The main reason for this method is when dropping tables
# or columns. If you have previously defined a constraint
# validation on the table or column, you should delete the
# related metadata when dropping the table or column.
# For a table, this isn't a big issue, as it will just result
# in some wasted space, but for columns, if you don't drop
# the related metadata, it could make it impossible to save
# rows, since a validation for a nonexistent column will be
# created.
def drop_constraint_validations_for(opts=OPTS)
ds = from(constraint_validations_table)
if table = opts[:table]
ds = ds.where(:table=>constraint_validations_literal_table(table))
end
if column = opts[:column]
ds = ds.where(:column=>column.to_s)
end
if constraint = opts[:constraint]
ds = ds.where(:constraint_name=>constraint.to_s)
end
unless table || column || constraint
raise Error, "must specify :table, :column, or :constraint when dropping constraint validations"
end
ds.delete
end
# Modify the default alter_table generator to include
# the constraint validation methods.
def alter_table_generator(&block)
super do
extend AlterTableGeneratorMethods
@validations = []
instance_exec(&block) if block
end
end
private
# After running all of the table alteration statements,
# if there were any constraint validations, run table alteration
# statements to create related constraints. This is purposely
# run after the other statements, as the presence validation
# in alter table requires introspecting the modified model
# schema.
def apply_alter_table_generator(name, generator)
super
unless generator.validations.empty?
gen = alter_table_generator
process_generator_validations(name, gen, generator.validations)
apply_alter_table(name, gen.operations)
end
end
# The value of a blank string. An empty string by default, but nil
# on Oracle as Oracle treats the empty string as NULL.
def blank_string_value
if database_type == :oracle
nil
else
''
end
end
# Return an unquoted literal form of the table name.
# This allows the code to handle schema qualified tables,
# without quoting all table names.
def constraint_validations_literal_table(table)
dataset.with_quote_identifiers(false).literal(table)
end
# Before creating the table, add constraints for all of the
# generators validations to the generator.
def create_table_from_generator(name, generator, options)
unless generator.validations.empty?
process_generator_validations(name, generator, generator.validations)
end
super
end
def constraint_validation_expression(cols, allow_nil)
exprs = cols.map do |c|
expr = yield c
if allow_nil
Sequel.|({c=>nil}, expr)
else
Sequel.&(Sequel.~(c=>nil), expr)
end
end
Sequel.&(*exprs)
end
# For the given table, generator, and validations, add constraints
# to the generator for each of the validations, as well as adding
# validation metadata to the constraint validations table.
def process_generator_validations(table, generator, validations)
drop_rows = []
rows = validations.map do |val|
columns, arg, constraint, validation_type, message, allow_nil = val.values_at(:columns, :arg, :name, :type, :message, :allow_nil)
case validation_type
when :presence
strings, non_strings = columns.partition{|c| generator_string_column?(generator, table, c)}
if !non_strings.empty? && !allow_nil
non_strings_expr = Sequel.&(*non_strings.map{|c| Sequel.~(c=>nil)})
end
unless strings.empty?
strings_expr = constraint_validation_expression(strings, allow_nil){|c| Sequel.~(Sequel.trim(c) => blank_string_value)}
end
expr = if non_strings_expr && strings_expr
Sequel.&(strings_expr, non_strings_expr)
else
strings_expr || non_strings_expr
end
if expr
generator.constraint(constraint, expr)
end
when :exact_length
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| {Sequel.char_length(c) => arg}})
when :min_length
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| Sequel.char_length(c) >= arg})
when :max_length
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| Sequel.char_length(c) <= arg})
when *REVERSE_OPERATOR_MAP.keys
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| Sequel.identifier(c).public_send(REVERSE_OPERATOR_MAP[validation_type], arg)})
when :length_range
op = arg.exclude_end? ? :< : :<=
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| (Sequel.char_length(c) >= arg.begin) & Sequel.char_length(c).public_send(op, arg.end)})
arg = "#{arg.begin}..#{'.' if arg.exclude_end?}#{arg.end}"
when :format
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| {c => arg}})
if arg.casefold?
validation_type = :iformat
end
arg = arg.source
when :includes
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| {c => arg}})
if arg.is_a?(Range)
if arg.begin.is_a?(Integer) && arg.end.is_a?(Integer)
validation_type = :includes_int_range
arg = "#{arg.begin}..#{'.' if arg.exclude_end?}#{arg.end}"
else
raise Error, "validates includes with a range only supports integers currently, cannot handle: #{arg.inspect}"
end
elsif arg.is_a?(Array)
if arg.all?{|x| x.is_a?(Integer)}
validation_type = :includes_int_array
elsif arg.all?{|x| x.is_a?(String)}
validation_type = :includes_str_array
else
raise Error, "validates includes with an array only supports strings and integers currently, cannot handle: #{arg.inspect}"
end
arg = arg.join(',')
else
raise Error, "validates includes only supports arrays and ranges currently, cannot handle: #{arg.inspect}"
end
when :like, :ilike
generator.constraint(constraint, constraint_validation_expression(columns, allow_nil){|c| Sequel.public_send(validation_type, c, arg)})
when :unique
generator.unique(columns, :name=>constraint)
columns = [columns.join(',')]
when :drop
if generator.is_a?(Sequel::Schema::AlterTableGenerator)
unless constraint
raise Error, 'cannot drop a constraint validation without a constraint name'
end
generator.drop_constraint(constraint)
drop_rows << [constraint_validations_literal_table(table), constraint.to_s]
columns = []
else
raise Error, 'cannot drop a constraint validation in a create_table generator'
end
else
raise Error, "invalid or missing validation type: #{val.inspect}"
end
columns.map do |column|
{:table=>constraint_validations_literal_table(table), :constraint_name=>(constraint.to_s if constraint), :validation_type=>validation_type.to_s, :column=>column.to_s, :argument=>(arg.to_s if arg), :message=>(message.to_s if message), :allow_nil=>allow_nil}
end
end
ds = from(constraint_validations_table)
unless drop_rows.empty?
ds.where([:table, :constraint_name]=>drop_rows).delete
end
ds.multi_insert(rows.flatten)
end
# Introspect the generator to determine if column
# created is a string or not.
def generator_string_column?(generator, table, c)
if generator.is_a?(Sequel::Schema::AlterTableGenerator)
# This is the alter table case, which runs after the
# table has been altered, so just check the database
# schema for the column.
schema(table).each do |col, sch|
if col == c
return sch[:type] == :string
end
end
false
else
# This is the create table case, check the metadata
# for the column to be created to see if it is a string.
generator.columns.each do |col|
if col[:name] == c
return [String, :text, :varchar].include?(col[:type])
end
end
false
end
end
end
Database.register_extension(:constraint_validations, ConstraintValidations)
end
sequel-5.63.0/lib/sequel/extensions/core_extensions.rb 0000664 0000000 0000000 00000020031 14342141206 0023073 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# These are extensions to core classes that Sequel enables by default.
# They make using Sequel's DSL easier by adding methods to Array,
# Hash, String, and Symbol to add methods that return Sequel
# expression objects. To load the extension:
#
# Sequel.extension :core_extensions
# This extension loads the core extensions.
def Sequel.core_extensions?
true
end
Sequel.extension :symbol_as
# Sequel extends +Array+ to add methods to implement the SQL DSL.
# Most of these methods require that the array not be empty and that it
# must consist solely of other arrays that have exactly two elements.
class Array
# Return a Sequel::SQL::BooleanExpression created from this array, not matching all of the
# conditions.
#
# ~[[:a, true]] # SQL: (a IS NOT TRUE)
# ~[[:a, 1], [:b, [2, 3]]] # SQL: ((a != 1) OR (b NOT IN (2, 3)))
def ~
Sequel.~(self)
end
# Return a Sequel::SQL::CaseExpression with this array as the conditions and the given
# default value and expression.
#
# [[{a: [2,3]}, 1]].case(0) # SQL: CASE WHEN (a IN (2, 3)) THEN 1 ELSE 0 END
# [[:a, 1], [:b, 2]].case(:d, :c) # SQL: CASE c WHEN a THEN 1 WHEN b THEN 2 ELSE d END
def case(*args)
::Sequel::SQL::CaseExpression.new(self, *args)
end
# Return a Sequel::SQL::ValueList created from this array. Used if this array contains
# all two element arrays and you want it treated as an SQL value list (IN predicate)
# instead of as a conditions specifier (similar to a hash). This is not necessary if you are using
# this array as a value in a filter, but may be necessary if you are using it as a
# value with placeholder SQL:
#
# DB[:a].where([:a, :b]=>[[1, 2], [3, 4]]) # SQL: ((a, b) IN ((1, 2), (3, 4)))
# DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]]) # SQL: ((a, b) IN ((1 = 2) AND (3 = 4)))
# DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]].sql_value_list) # SQL: ((a, b) IN ((1, 2), (3, 4)))
def sql_value_list
::Sequel::SQL::ValueList.new(self)
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching all of the
# conditions. Rarely do you need to call this explicitly, as Sequel generally
# assumes that arrays of two element arrays specify this type of condition. One case where
# it can be necessary to use this is if you are using the object as a value in a filter hash
# and want to use the = operator instead of the IN operator (which is used by default for
# arrays of two element arrays).
#
# [[:a, true]].sql_expr # SQL: (a IS TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_expr # SQL: ((a = 1) AND (b IN (2, 3)))
def sql_expr
Sequel[self]
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching none
# of the conditions.
#
# [[:a, true]].sql_negate # SQL: (a IS NOT TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_negate # SQL: ((a != 1) AND (b NOT IN (2, 3)))
def sql_negate
Sequel.negate(self)
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching any of the
# conditions.
#
# [[:a, true]].sql_or # SQL: (a IS TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_or # SQL: ((a = 1) OR (b IN (2, 3)))
def sql_or
Sequel.or(self)
end
# Return a Sequel::SQL::StringExpression representing an SQL string made up of the
# concatenation of this array's elements. If an argument is passed
# it is used in between each element of the array in the SQL
# concatenation.
#
# [:a].sql_string_join # SQL: a
# [:a, :b].sql_string_join # SQL: (a || b)
# [:a, 'b'].sql_string_join # SQL: (a || 'b')
# ['a', :b].sql_string_join(' ') # SQL: ('a' || ' ' || b)
def sql_string_join(joiner=nil)
Sequel.join(self, joiner)
end
end
# Sequel extends +Hash+ to add methods to implement the SQL DSL.
class Hash
# Return a Sequel::SQL::BooleanExpression created from this hash, matching
# all of the conditions in this hash and the condition specified by
# the given argument.
#
# {a: 1} & :b # SQL: ((a = 1) AND b)
# {a: true} & ~:b # SQL: ((a IS TRUE) AND NOT b)
def &(ce)
::Sequel::SQL::BooleanExpression.new(:AND, self, ce)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching
# all of the conditions in this hash or the condition specified by
# the given argument.
#
# {a: 1} | :b # SQL: ((a = 1) OR b)
# {a: true} | ~:b # SQL: ((a IS TRUE) OR NOT b)
def |(ce)
::Sequel::SQL::BooleanExpression.new(:OR, self, ce)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, not matching all of the
# conditions.
#
# ~{a: true} # SQL: (a IS NOT TRUE)
# ~{a: 1, b: [2, 3]} # SQL: ((a != 1) OR (b NOT IN (2, 3)))
def ~
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :OR, true)
end
# Return a Sequel::SQL::CaseExpression with this hash as the conditions and the given
# default value.
#
# {{a: [2,3]}=>1}.case(0) # SQL: CASE WHEN (a IN (2, 3)) THEN 1 ELSE 0 END
# {a: 1, b: 2}.case(:d, :c) # SQL: CASE c WHEN a THEN 1 WHEN b THEN 2 ELSE d END
def case(*args)
::Sequel::SQL::CaseExpression.new(to_a, *args)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching all of the
# conditions. Rarely do you need to call this explicitly, as Sequel generally
# assumes that hashes specify this type of condition.
#
# {a: true}.sql_expr # SQL: (a IS TRUE)
# {a: 1, b: [2, 3]}.sql_expr # SQL: ((a = 1) AND (b IN (2, 3)))
def sql_expr
::Sequel::SQL::BooleanExpression.from_value_pairs(self)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching none
# of the conditions.
#
# {a: true}.sql_negate # SQL: (a IS NOT TRUE)
# {a: 1, b: [2, 3]}.sql_negate # SQL: ((a != 1) AND (b NOT IN (2, 3)))
def sql_negate
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :AND, true)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching any of the
# conditions.
#
# {a: true}.sql_or # SQL: (a IS TRUE)
# {a: 1, b: [2, 3]}.sql_or # SQL: ((a = 1) OR (b IN (2, 3)))
def sql_or
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :OR)
end
end
# Sequel extends +String+ to add methods to implement the SQL DSL.
class String
include Sequel::SQL::AliasMethods
include Sequel::SQL::CastMethods
# Converts a string into a Sequel::LiteralString, in order to override string
# literalization, e.g.:
#
# DB[:items].where(abc: 'def')
# # "SELECT * FROM items WHERE (abc = 'def')"
#
# DB[:items].where(abc: 'def'.lit)
# # "SELECT * FROM items WHERE (abc = def)"
#
# You can also provide arguments, to create a Sequel::SQL::PlaceholderLiteralString:
#
# DB[:items].select{|o| o.count('DISTINCT ?'.lit(:a))}
# # "SELECT count(DISTINCT a) FROM items"
def lit(*args)
args.empty? ? Sequel::LiteralString.new(self) : Sequel::SQL::PlaceholderLiteralString.new(self, args)
end
# Returns a Sequel::SQL::Blob that holds the same data as this string. Blobs provide proper
# escaping of binary data.
def to_sequel_blob
::Sequel::SQL::Blob.new(self)
end
end
# Sequel extends +Symbol+ to add methods to implement the SQL DSL.
class Symbol
include Sequel::SQL::CastMethods
include Sequel::SQL::OrderMethods
include Sequel::SQL::BooleanMethods
include Sequel::SQL::NumericMethods
include Sequel::SQL::QualifyingMethods
include Sequel::SQL::StringMethods
include Sequel::SQL::SubscriptMethods
include Sequel::SQL::ComplexExpressionMethods
# Returns receiver wrapped in an Sequel::SQL::Identifier.
#
# :a.identifier # SQL: "a"
def identifier
Sequel::SQL::Identifier.new(self)
end
# Returns a Sequel::SQL::Function with this as the function name,
# and the given arguments.
#
# :now.sql_function # SQL: now()
# :sum.sql_function(:a) # SQL: sum(a)
# :concat.sql_function(:a, :b) # SQL: concat(a, b)
def sql_function(*args)
Sequel::SQL::Function.new(self, *args)
end
end
sequel-5.63.0/lib/sequel/extensions/core_refinements.rb 0000664 0000000 0000000 00000021326 14342141206 0023223 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# These are refinements to core classes that allow the Sequel
# DSL to be used without modifying the core classes directly.
# After loading the extension via:
#
# Sequel.extension :core_refinements
#
# you can enable the refinements for particular files:
#
# using Sequel::CoreRefinements
# :nocov:
raise(Sequel::Error, "Refinements require ruby 2.0.0 or greater") unless RUBY_VERSION >= '2.0.0'
# :nocov:
module Sequel::CoreRefinements
# :nocov:
include_meth = RUBY_VERSION >= '3.1' ? :import_methods : :include
# :nocov:
INCLUDE_METH = include_meth
private_constant :INCLUDE_METH
refine Array do
# Return a Sequel::SQL::BooleanExpression created from this array, not matching all of the
# conditions.
#
# ~[[:a, true]] # SQL: (a IS NOT TRUE)
# ~[[:a, 1], [:b, [2, 3]]] # SQL: ((a != 1) OR (b NOT IN (2, 3)))
def ~
Sequel.~(self)
end
# Return a Sequel::SQL::CaseExpression with this array as the conditions and the given
# default value and expression.
#
# [[{a: [2,3]}, 1]].case(0) # SQL: CASE WHEN (a IN (2, 3)) THEN 1 ELSE 0 END
# [[:a, 1], [:b, 2]].case(:d, :c) # SQL: CASE c WHEN a THEN 1 WHEN b THEN 2 ELSE d END
def case(*args)
::Sequel::SQL::CaseExpression.new(self, *args)
end
# Return a Sequel::SQL::ValueList created from this array. Used if this array contains
# all two element arrays and you want it treated as an SQL value list (IN predicate)
# instead of as a conditions specifier (similar to a hash). This is not necessary if you are using
# this array as a value in a filter, but may be necessary if you are using it as a
# value with placeholder SQL:
#
# DB[:a].where([:a, :b]=>[[1, 2], [3, 4]]) # SQL: ((a, b) IN ((1, 2), (3, 4)))
# DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]]) # SQL: ((a, b) IN ((1 = 2) AND (3 = 4)))
# DB[:a].where('(a, b) IN ?', [[1, 2], [3, 4]].sql_value_list) # SQL: ((a, b) IN ((1, 2), (3, 4)))
def sql_value_list
::Sequel::SQL::ValueList.new(self)
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching all of the
# conditions. Rarely do you need to call this explicitly, as Sequel generally
# assumes that arrays of two element arrays specify this type of condition. One case where
# it can be necessary to use this is if you are using the object as a value in a filter hash
# and want to use the = operator instead of the IN operator (which is used by default for
# arrays of two element arrays).
#
# [[:a, true]].sql_expr # SQL: (a IS TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_expr # SQL: ((a = 1) AND (b IN (2, 3)))
def sql_expr
Sequel[self]
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching none
# of the conditions.
#
# [[:a, true]].sql_negate # SQL: (a IS NOT TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_negate # SQL: ((a != 1) AND (b NOT IN (2, 3)))
def sql_negate
Sequel.negate(self)
end
# Return a Sequel::SQL::BooleanExpression created from this array, matching any of the
# conditions.
#
# [[:a, true]].sql_or # SQL: (a IS TRUE)
# [[:a, 1], [:b, [2, 3]]].sql_or # SQL: ((a = 1) OR (b IN (2, 3)))
def sql_or
Sequel.or(self)
end
# Return a Sequel::SQL::StringExpression representing an SQL string made up of the
# concatenation of this array's elements. If an argument is passed
# it is used in between each element of the array in the SQL
# concatenation.
#
# [:a].sql_string_join # SQL: a
# [:a, :b].sql_string_join # SQL: (a || b)
# [:a, 'b'].sql_string_join # SQL: (a || 'b')
# ['a', :b].sql_string_join(' ') # SQL: ('a' || ' ' || b)
def sql_string_join(joiner=nil)
Sequel.join(self, joiner)
end
end
refine Hash do
# Return a Sequel::SQL::BooleanExpression created from this hash, matching
# all of the conditions in this hash and the condition specified by
# the given argument.
#
# {a: 1} & :b # SQL: ((a = 1) AND b)
# {a: true} & ~:b # SQL: ((a IS TRUE) AND NOT b)
def &(ce)
::Sequel::SQL::BooleanExpression.new(:AND, self, ce)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching
# all of the conditions in this hash or the condition specified by
# the given argument.
#
# {a: 1} | :b # SQL: ((a = 1) OR b)
# {a: true} | ~:b # SQL: ((a IS TRUE) OR NOT b)
def |(ce)
::Sequel::SQL::BooleanExpression.new(:OR, self, ce)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, not matching all of the
# conditions.
#
# ~{a: true} # SQL: (a IS NOT TRUE)
# ~{a: 1, b: [2, 3]} # SQL: ((a != 1) OR (b NOT IN (2, 3)))
def ~
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :OR, true)
end
# Return a Sequel::SQL::CaseExpression with this hash as the conditions and the given
# default value.
#
# {{a: [2,3]}=>1}.case(0) # SQL: CASE WHEN (a IN (2, 3)) THEN 1 ELSE 0 END
# {a: 1, b: 2}.case(:d, :c) # SQL: CASE c WHEN a THEN 1 WHEN b THEN 2 ELSE d END
def case(*args)
::Sequel::SQL::CaseExpression.new(to_a, *args)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching all of the
# conditions. Rarely do you need to call this explicitly, as Sequel generally
# assumes that hashes specify this type of condition.
#
# {a: true}.sql_expr # SQL: (a IS TRUE)
# {a: 1, b: [2, 3]}.sql_expr # SQL: ((a = 1) AND (b IN (2, 3)))
def sql_expr
::Sequel::SQL::BooleanExpression.from_value_pairs(self)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching none
# of the conditions.
#
# {a: true}.sql_negate # SQL: (a IS NOT TRUE)
# {a: 1, b: [2, 3]}.sql_negate # SQL: ((a != 1) AND (b NOT IN (2, 3)))
def sql_negate
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :AND, true)
end
# Return a Sequel::SQL::BooleanExpression created from this hash, matching any of the
# conditions.
#
# {a: true}.sql_or # SQL: (a IS TRUE)
# {a: 1, b: [2, 3]}.sql_or # SQL: ((a = 1) OR (b IN (2, 3)))
def sql_or
::Sequel::SQL::BooleanExpression.from_value_pairs(self, :OR)
end
end
refine String do
send include_meth, Sequel::SQL::AliasMethods
send include_meth, Sequel::SQL::CastMethods
# Converts a string into a Sequel::LiteralString, in order to override string
# literalization, e.g.:
#
# DB[:items].where(abc: 'def')
# # "SELECT * FROM items WHERE (abc = 'def')"
#
# DB[:items].where(abc: 'def'.lit)
# # "SELECT * FROM items WHERE (abc = def)"
#
# You can also provide arguments, to create a Sequel::SQL::PlaceholderLiteralString:
#
# DB[:items].select{|o| o.count('DISTINCT ?'.lit(:a))}
# # "SELECT count(DISTINCT a) FROM items"
def lit(*args)
args.empty? ? Sequel::LiteralString.new(self) : Sequel::SQL::PlaceholderLiteralString.new(self, args)
end
# Returns a Sequel::SQL::Blob that holds the same data as this string. Blobs provide proper
# escaping of binary data.
def to_sequel_blob
::Sequel::SQL::Blob.new(self)
end
end
refine Symbol do
send include_meth, Sequel::SQL::AliasMethods
send include_meth, Sequel::SQL::CastMethods
send include_meth, Sequel::SQL::OrderMethods
send include_meth, Sequel::SQL::BooleanMethods
send include_meth, Sequel::SQL::NumericMethods
# :nocov:
remove_method :* if RUBY_VERSION >= '3.1'
# :nocov:
send include_meth, Sequel::SQL::QualifyingMethods
send include_meth, Sequel::SQL::StringMethods
send include_meth, Sequel::SQL::SubscriptMethods
send include_meth, Sequel::SQL::ComplexExpressionMethods
# :nocov:
if RUBY_VERSION >= '3.1'
remove_method :*
def *(ce=(arg=false;nil))
if arg == false
Sequel::SQL::ColumnAll.new(self)
else
Sequel::SQL::NumericExpression.new(:*, self, ce)
end
end
end
# :nocov:
# Returns receiver wrapped in an Sequel::SQL::Identifier.
#
# :ab.identifier # SQL: "a"
def identifier
Sequel::SQL::Identifier.new(self)
end
# Returns a Sequel::SQL::Function with this as the function name,
# and the given arguments.
#
# :now.sql_function # SQL: now()
# :sum.sql_function(:a) # SQL: sum(a)
# :concat.sql_function(:a, :b) # SQL: concat(a, b)
def sql_function(*args)
Sequel::SQL::Function.new(self, *args)
end
end
end
sequel-5.63.0/lib/sequel/extensions/current_datetime_timestamp.rb 0000664 0000000 0000000 00000003726 14342141206 0025321 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The current_datetime_timestamp extension makes Dataset#current_datetime
# return an object that operates like Sequel.datetime_class.now, but will
# be literalized as CURRENT_TIMESTAMP.
#
# This allows you to use the defaults_setter, timestamps, and touch
# model plugins and make sure that CURRENT_TIMESTAMP is used instead of
# a literalized timestamp value.
#
# The reason that CURRENT_TIMESTAMP is better than a literalized version
# of the timestamp is that it obeys correct transactional semantics
# (all calls to CURRENT_TIMESTAMP in the same transaction return the
# same timestamp, at least on some databases).
#
# To have current_datetime be literalized as CURRENT_TIMESTAMP for
# a single dataset:
#
# ds = ds.extension(:current_datetime_timestamp)
#
# To have current_datetime be literalized as CURRENT_TIMESTAMP for all
# datasets of a given database.
#
# DB.extension(:current_datetime_timestamp)
#
# Related module: Sequel::CurrentDateTimeTimestamp
#
module Sequel
module CurrentDateTimeTimestamp
module DatasetMethods
# Return an instance of Sequel.datetime_class that will be literalized
# as CURRENT_TIMESTAMP.
def current_datetime
(Sequel.datetime_class == ::Time ? Time : DateTime).now
end
private
# Literalize custom DateTime subclass objects as CURRENT_TIMESTAMP.
def literal_datetime_append(sql, v)
v.is_a?(DateTime) ? literal_append(sql, Sequel::CURRENT_TIMESTAMP) : super
end
# Literalize custom Time subclass objects as CURRENT_TIMESTAMP.
def literal_time_append(sql, v)
v.is_a?(Time) ? literal_append(sql, Sequel::CURRENT_TIMESTAMP) : super
end
end
# Time subclass literalized as CURRENT_TIMESTAMP
class Time < ::Time; end
# DateTime subclass literalized as CURRENT_TIMESTAMP
class DateTime < ::DateTime; end
end
Dataset.register_extension(:current_datetime_timestamp, CurrentDateTimeTimestamp::DatasetMethods)
end
sequel-5.63.0/lib/sequel/extensions/dataset_source_alias.rb 0000664 0000000 0000000 00000006103 14342141206 0024046 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The dataset_source_alias extension changes Sequel's
# default behavior of automatically aliasing datasets
# from using t1, t2, etc. to using an alias based on
# the source of the dataset. Example:
#
# DB.from(DB.from(:a))
# # default: SELECT * FROM (SELECT * FROM a) AS t1
# # with extension: SELECT * FROM (SELECT * FROM a) AS a
#
# This also works when joining:
#
# DB[:a].join(DB[:b], [:id])
# # SELECT * FROM a INNER JOIN (SELECT * FROM b) AS b USING (id)
#
# To avoid conflicting aliases, this attempts to alias tables
# uniquely if it detects a conflict:
#
# DB.from(:a, DB.from(:a))
# # SELECT * FROM a, (SELECT * FROM a) AS a_0
#
# Note that not all conflicts are correctly detected and handled.
# It is encouraged to alias your datasets manually instead of
# relying on the auto-aliasing if there would be a conflict.
#
# In the places where Sequel cannot determine the
# appropriate alias to use for the dataset, it will fallback to
# the standard t1, t2, etc. aliasing.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:dataset_source_alias)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:dataset_source_alias)
#
# Related module: Sequel::Dataset::DatasetSourceAlias
#
module Sequel
class Dataset
module DatasetSourceAlias
# Preprocess the list of sources and attempt to alias any
# datasets in the sources to the first source of the respective
# dataset.
def from(*source, &block)
virtual_row_columns(source, block)
table_aliases = []
source = source.map do |s|
case s
when Dataset
s = dataset_source_alias_expression(s, table_aliases)
when Symbol, String, SQL::AliasedExpression, SQL::Identifier, SQL::QualifiedIdentifier
table_aliases << alias_symbol(s)
end
s
end
super(*source, &nil)
end
# If a Dataset is given as the table argument, attempt to alias
# it to its source.
def join_table(type, table, expr=nil, options=OPTS)
if table.is_a?(Dataset) && !options[:table_alias]
table = dataset_source_alias_expression(table)
end
super
end
private
# Attempt to automatically alias the given dataset to its source.
# If the dataset cannot be automatically aliased to its source,
# return it unchanged. The table_aliases argument is a list of
# already used alias symbols, which will not be used as the alias.
def dataset_source_alias_expression(ds, table_aliases=[])
base = ds.first_source if ds.opts[:from]
case base
when Symbol, String, SQL::AliasedExpression, SQL::Identifier, SQL::QualifiedIdentifier
aliaz = unused_table_alias(base, table_aliases)
table_aliases << aliaz
ds.as(aliaz)
else
ds
end
end
end
register_extension(:dataset_source_alias, DatasetSourceAlias)
end
end
sequel-5.63.0/lib/sequel/extensions/date_arithmetic.rb 0000664 0000000 0000000 00000023337 14342141206 0023026 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The date_arithmetic extension adds the ability to perform database-independent
# addition/substraction of intervals to/from dates and timestamps.
#
# First, you need to load the extension into the database:
#
# DB.extension :date_arithmetic
#
# Then you can use the Sequel.date_add and Sequel.date_sub methods
# to return Sequel expressions (this example shows the only supported
# keys for the second argument):
#
# add = Sequel.date_add(:date_column, years: 1, months: 2, weeks: 2, days: 1)
# sub = Sequel.date_sub(:date_column, hours: 1, minutes: 2, seconds: 3)
#
# In addition to specifying the interval as a hash, there is also
# support for specifying the interval as an ActiveSupport::Duration
# object:
#
# require 'active_support/all'
# add = Sequel.date_add(:date_column, 1.years + 2.months + 3.days)
# sub = Sequel.date_sub(:date_column, 1.hours + 2.minutes + 3.seconds)
#
# By default, values are casted to the generic timestamp type for the
# database. You can override the cast type using the :cast option:
#
# add = Sequel.date_add(:date_column, {years: 1, months: 2, days: 3}, cast: :timestamptz)
#
# These expressions can be used in your datasets, or anywhere else that
# Sequel expressions are allowed:
#
# DB[:table].select(add.as(:d)).where(sub > Sequel::CURRENT_TIMESTAMP)
#
# On most databases, the values you provide for years/months/days/etc. must
# be numeric values and not arbitrary SQL expressions. However, on PostgreSQL
# 9.4+, use of arbitrary SQL expressions is supported.
#
# Related module: Sequel::SQL::DateAdd
#
module Sequel
module SQL
module Builders
# Return a DateAdd expression, adding an interval to the date/timestamp expr.
# Options:
# :cast :: Cast to the specified type instead of the default if casting
def date_add(expr, interval, opts=OPTS)
DateAdd.new(expr, interval, opts)
end
# Return a DateAdd expression, adding the negative of the interval to
# the date/timestamp expr.
# Options:
# :cast :: Cast to the specified type instead of the default if casting
def date_sub(expr, interval, opts=OPTS)
if defined?(ActiveSupport::Duration) && interval.is_a?(ActiveSupport::Duration)
interval = interval.parts
end
parts = {}
interval.each do |k,v|
case v
when nil
# ignore
when Numeric
parts[k] = -v
else
parts[k] = Sequel::SQL::NumericExpression.new(:*, v, -1)
end
end
DateAdd.new(expr, parts, opts)
end
end
# The DateAdd class represents the addition of an interval to a
# date/timestamp expression.
class DateAdd < GenericExpression
# These methods are added to datasets using the date_arithmetic
# extension, for the purposes of correctly literalizing DateAdd
# expressions for the appropriate database type.
module DatasetMethods
DURATION_UNITS = [:years, :months, :days, :hours, :minutes, :seconds].freeze
DEF_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| s.to_s.freeze}).freeze
POSTGRES_DURATION_UNITS = DURATION_UNITS.zip([:years, :months, :days, :hours, :mins, :secs].map{|s| s.to_s.freeze}).freeze
MYSQL_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| Sequel.lit(s.to_s.upcase[0...-1]).freeze}).freeze
MSSQL_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| Sequel.lit(s.to_s[0...-1]).freeze}).freeze
H2_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| s.to_s[0...-1].freeze}).freeze
DERBY_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| Sequel.lit("SQL_TSI_#{s.to_s.upcase[0...-1]}").freeze}).freeze
ACCESS_DURATION_UNITS = DURATION_UNITS.zip(%w'yyyy m d h n s'.map(&:freeze)).freeze
DB2_DURATION_UNITS = DURATION_UNITS.zip(DURATION_UNITS.map{|s| Sequel.lit(s.to_s).freeze}).freeze
# Append the SQL fragment for the DateAdd expression to the SQL query.
def date_add_sql_append(sql, da)
if defined?(super)
return super
end
h = da.interval
expr = da.expr
cast_type = da.cast_type || Time
cast = case db_type = db.database_type
when :postgres
casted = Sequel.cast(expr, cast_type)
if db.server_version >= 90400
placeholder = []
vals = []
each_valid_interval_unit(h, POSTGRES_DURATION_UNITS) do |value, sql_unit|
placeholder << "#{', ' unless placeholder.empty?}#{sql_unit} := "
vals << value
end
interval = Sequel.function(:make_interval, Sequel.lit(placeholder, *vals)) unless vals.empty?
else
parts = String.new
each_valid_interval_unit(h, DEF_DURATION_UNITS) do |value, sql_unit|
parts << "#{value} #{sql_unit} "
end
interval = Sequel.cast(parts, :interval) unless parts.empty?
end
if interval
return complex_expression_sql_append(sql, :+, [casted, interval])
else
return literal_append(sql, casted)
end
when :sqlite
args = [expr]
each_valid_interval_unit(h, DEF_DURATION_UNITS) do |value, sql_unit|
args << "#{value} #{sql_unit}"
end
return function_sql_append(sql, Sequel.function(:datetime, *args))
when :mysql, :hsqldb
if db_type == :hsqldb
# HSQLDB requires 2.2.9+ for the DATE_ADD function
expr = Sequel.cast(expr, cast_type)
end
each_valid_interval_unit(h, MYSQL_DURATION_UNITS) do |value, sql_unit|
expr = Sequel.function(:DATE_ADD, expr, Sequel.lit(["INTERVAL ", " "], value, sql_unit))
end
when :mssql, :h2, :access, :sqlanywhere
units = case db_type
when :h2
H2_DURATION_UNITS
when :access
ACCESS_DURATION_UNITS
else
MSSQL_DURATION_UNITS
end
each_valid_interval_unit(h, units) do |value, sql_unit|
expr = Sequel.function(:DATEADD, sql_unit, value, expr)
end
when :derby
if expr.is_a?(Date) && !expr.is_a?(DateTime)
# Work around for https://issues.apache.org/jira/browse/DERBY-896
expr = Sequel.cast_string(expr) + ' 00:00:00'
end
each_valid_interval_unit(h, DERBY_DURATION_UNITS) do |value, sql_unit|
expr = Sequel.lit(["{fn timestampadd(#{sql_unit}, ", ", timestamp(", "))}"], value, expr)
end
when :oracle
each_valid_interval_unit(h, MYSQL_DURATION_UNITS) do |value, sql_unit|
expr = Sequel.+(expr, Sequel.lit(["INTERVAL ", " "], value.to_s, sql_unit))
end
when :db2
expr = Sequel.cast(expr, cast_type)
each_valid_interval_unit(h, DB2_DURATION_UNITS) do |value, sql_unit|
expr = Sequel.+(expr, Sequel.lit(["", " "], value, sql_unit))
end
false
else
raise Error, "date arithmetic is not implemented on #{db.database_type}"
end
if cast
expr = Sequel.cast(expr, cast_type)
end
literal_append(sql, expr)
end
private
# Yield the value in the interval for each of the units
# present in the interval, along with the SQL fragment
# representing the unit name. Returns false if any
# values were yielded, true otherwise
def each_valid_interval_unit(interval, units)
cast = true
units.each do |unit, sql_unit|
if (value = interval[unit]) && value != 0
cast = false
yield value, sql_unit
end
end
cast
end
end
# The expression that the interval is being added to.
attr_reader :expr
# The interval added to the expression, as a hash with
# symbol keys.
attr_reader :interval
# The type to cast the expression to. nil if not overridden, in which cast
# the generic timestamp type for the database will be used.
attr_reader :cast_type
# Supports two types of intervals:
# Hash :: Used directly, but values cannot be plain strings.
# ActiveSupport::Duration :: Converted to a hash using the interval's parts.
def initialize(expr, interval, opts=OPTS)
@expr = expr
h = Hash.new(0)
interval = interval.parts unless interval.is_a?(Hash)
interval.each do |unit, value|
# skip nil values
next unless value
# Convert weeks to days, as ActiveSupport::Duration can use weeks,
# but the database-specific literalizers only support days.
if unit == :weeks
unit = :days
value *= 7
end
unless DatasetMethods::DURATION_UNITS.include?(unit)
raise Sequel::Error, "Invalid key used in DateAdd interval hash: #{unit.inspect}"
end
# Attempt to prevent SQL injection by users who pass untrusted strings
# as interval values. It doesn't make sense to support literal strings,
# due to the numeric adding below.
if value.is_a?(String)
raise Sequel::InvalidValue, "cannot provide String value as interval part: #{value.inspect}"
end
h[unit] += value
end
@interval = Hash[h].freeze
@cast_type = opts[:cast] if opts[:cast]
freeze
end
to_s_method :date_add_sql
end
end
Dataset.register_extension(:date_arithmetic, SQL::DateAdd::DatasetMethods)
end
sequel-5.63.0/lib/sequel/extensions/date_parse_input_handler.rb 0000664 0000000 0000000 00000004110 14342141206 0024707 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The date_parse_input_handler extension allows for configuring how input
# to date parsing methods should be handled. By default, the
# extension does not change behavior. However, you can use the
# +Sequel.date_parse_input_handler+ method to support custom handling
# of input strings to the date parsing methods. For example, if you want
# to implement a length check to prevent denial of service vulnerabilities
# in older versions of Ruby, you can do:
#
# Sequel.extension :date_parse_input_handler
# Sequel.date_parse_input_handler do |string|
# raise Sequel::InvalidValue, "string length (200) exceeds the limit 128" if string.bytesize > 128
# string
# end
#
# You can also use +Sequel.date_parse_input_handler+ to modify the string
# that will be passed to the parsing methods. For example, you could
# truncate it:
#
# Sequel.date_parse_input_handler do |string|
# string.b[0, 128]
# end
#
# Be aware that modern versions of Ruby will raise an exception if
# date parsing input exceeds 128 bytes.
module Sequel
module DateParseInputHandler
def date_parse_input_handler(&block)
singleton_class.class_eval do
define_method(:handle_date_parse_input, &block)
private :handle_date_parse_input
alias handle_date_parse_input handle_date_parse_input
end
end
# Call date parse input handler with input string.
def string_to_date(string)
super(handle_date_parse_input(string))
end
# Call date parse input handler with input string.
def string_to_datetime(string)
super(handle_date_parse_input(string))
end
# Call date parse input handler with input string.
def string_to_time(string)
super(handle_date_parse_input(string))
end
private
# Call date parse input handler with input string.
def _date_parse(string)
super(handle_date_parse_input(string))
end
# Return string as-is by default, so by default behavior does not change.
def handle_date_parse_input(string)
string
end
end
extend DateParseInputHandler
end
sequel-5.63.0/lib/sequel/extensions/datetime_parse_to_time.rb 0000664 0000000 0000000 00000002534 14342141206 0024402 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# This switches the default parsing of strings into Time values
# from using Time.parse to using DateTime.parse.to_time. This
# fixes issues when the times being parsed have no timezone
# information, the implicit timezone for the Database instance
# is set to +:utc+, and the timestamps being used include values
# not valid in the local timezone, such as during a daylight
# savings time switch.
#
# To load the extension:
#
# Sequel.extension :datetime_parse_to_time
#
module Sequel::DateTimeParseToTime
private
# Use DateTime.parse.to_time to do the conversion if the input a string and is assumed to
# be in UTC and there is no offset information in the string.
def convert_input_timestamp(v, input_timezone)
if v.is_a?(String) && datetime_class == Time && input_timezone == :utc && !_date_parse(v).has_key?(:offset)
# :nocov:
# Whether this is fully branch covered depends on the order in which the specs are run.
v = handle_date_parse_input(v) if respond_to?(:handle_date_parse_input, true)
# :nocov:
t = DateTime.parse(v).to_time
case application_timezone
when nil, :local
t = t.localtime
end
t
else
super
end
rescue => e
raise convert_exception_class(e, Sequel::InvalidValue)
end
end
Sequel.extend(Sequel::DateTimeParseToTime)
sequel-5.63.0/lib/sequel/extensions/duplicate_columns_handler.rb 0000664 0000000 0000000 00000006453 14342141206 0025107 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The duplicate_columns_handler extension allows you to customize handling of
# duplicate column names in your queries on a per-database or per-dataset level.
#
# For example, you may want to raise an exception if you join 2 tables together
# which contains a column that will override another columns.
#
# To use the extension, you need to load the extension into the database:
#
# DB.extension :duplicate_columns_handler
#
# or into individual datasets:
#
# ds = DB[:items].extension(:duplicate_columns_handler)
#
# A database option is introduced: :on_duplicate_columns. It accepts a Symbol
# or any object that responds to :call.
#
# on_duplicate_columns: :raise
# on_duplicate_columns: :warn
# on_duplicate_columns: :ignore
# on_duplicate_columns: lambda{|columns| arbitrary_condition? ? :raise : :warn}
#
# You may also configure duplicate columns handling for a specific dataset:
#
# ds.on_duplicate_columns(:warn)
# ds.on_duplicate_columns(:raise)
# ds.on_duplicate_columns(:ignore)
# ds.on_duplicate_columns{|columns| arbitrary_condition? ? :raise : :warn}
# ds.on_duplicate_columns(lambda{|columns| arbitrary_condition? ? :raise : :warn})
#
# If :raise is specified, a Sequel::DuplicateColumnError is raised.
# If :warn is specified, you will receive a warning via +warn+.
# If a callable is specified, it will be called.
# If no on_duplicate_columns is specified, the default is :warn.
#
# Related module: Sequel::DuplicateColumnsHandler
module Sequel
module DuplicateColumnsHandler
# :nocov:
CALLER_ARGS = (RUBY_VERSION >= '2.0' ? [0,1] : [0]).freeze
# :nocov:
# Customize handling of duplicate columns for this dataset.
def on_duplicate_columns(handler = (raise Error, "Must provide either an argument or a block to on_duplicate_columns" unless defined?(yield); nil), &block)
raise Error, "Cannot provide both an argument and a block to on_duplicate_columns" if handler && block
clone(:on_duplicate_columns=>handler||block)
end
private
# Call handle_duplicate_columns if there are duplicate columns.
def columns=(cols)
if cols && cols.uniq.size != cols.size
handle_duplicate_columns(cols)
end
super
end
# Invoke the appropriate behavior when duplicate columns are present.
def handle_duplicate_columns(cols)
message = "#{caller(*CALLER_ARGS).first}: One or more duplicate columns present in #{cols.inspect}"
case duplicate_columns_handler_type(cols)
when :raise
raise DuplicateColumnError, message
when :warn
warn message
end
end
# Try to find dataset option for on_duplicate_columns. If not present on the dataset,
# use the on_duplicate_columns option on the database. If not present on the database,
# default to :warn.
def duplicate_columns_handler_type(cols)
handler = opts.fetch(:on_duplicate_columns){db.opts.fetch(:on_duplicate_columns, :warn)}
if handler.respond_to?(:call)
handler.call(cols)
else
handler
end
end
end
# Error which is raised when duplicate columns are present in a dataset which is configured
# to :raise on_duplicate_columns.
class DuplicateColumnError < Error
end
Dataset.register_extension(:duplicate_columns_handler, Sequel::DuplicateColumnsHandler)
end
sequel-5.63.0/lib/sequel/extensions/empty_array_consider_nulls.rb 0000664 0000000 0000000 00000002226 14342141206 0025331 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# This changes Sequel's literalization of IN/NOT IN with an empty
# array value to consider NULL values if one of the referenced
# columns is NULL:
#
# DB[:test].where(name: [])
# # SELECT * FROM test WHERE (name != name)
# DB[:test].exclude(name: [])
# # SELECT * FROM test WHERE (name = name)
#
# The default Sequel behavior is to ignore NULLs, as the above
# query is not generally optimized well by databases.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:empty_array_consider_nulls)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:empty_array_consider_nulls)
#
# Related module: Sequel::EmptyArrayConsiderNulls
#
module Sequel
module EmptyArrayConsiderNulls
# Use an expression that returns NULL if the column value is NULL.
def empty_array_value(op, cols)
c = Array(cols)
SQL::BooleanExpression.from_value_pairs(c.zip(c), :AND, op == :IN)
end
end
Dataset.register_extension(:empty_array_consider_nulls, EmptyArrayConsiderNulls)
end
sequel-5.63.0/lib/sequel/extensions/error_sql.rb 0000664 0000000 0000000 00000004746 14342141206 0021713 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The error_sql extension adds a DatabaseError#sql method
# that you can use to get the sql that caused the error
# to be raised.
#
# begin
# DB.run "Invalid SQL"
# rescue => e
# puts e.sql # "Invalid SQL"
# end
#
# On some databases, the error message contains part or all
# of the SQL used, but on other databases, none of the SQL
# used is displayed in the error message, so it can be
# difficult to track down what is causing the error without
# using a logger. This extension should hopefully make
# debugging easier on databases that have bad error
# messages.
#
# This extension may not work correctly in the following cases:
#
# * log_connection_yield is not used when executing the query.
# * The underlying exception is frozen or reused.
# * The underlying exception doesn't correctly record instance
# variables set on it (seems to happen on JRuby when underlying
# exception objects are Java exceptions).
#
# To load the extension into the database:
#
# DB.extension :error_sql
#
# Related module: Sequel::ErrorSQL
#
module Sequel
class DatabaseError
# Get the SQL code that caused this error to be raised.
def sql
# We store the error SQL in the wrapped exception instead of the
# current exception, since when the error SQL is originally associated
# with the wrapped exception, the current exception doesn't exist. It's
# possible to copy the error SQL into the current exception, but there
# doesn't seem to be a reason to do that.
wrapped_exception.instance_variable_get(:@sequel_error_sql) if wrapped_exception
end
end
module ErrorSQL
# Store the SQL related to the exception with the exception, so it
# is available for DatabaseError#sql later.
def log_exception(exception, message)
exception.instance_variable_set(:@sequel_error_sql, message)
super
end
# If there are no loggers for this database and an exception is raised
# store the SQL related to the exception with the exception, so it
# is available for DatabaseError#sql later.
def log_connection_yield(sql, conn, args=nil)
if @loggers.empty?
begin
yield
rescue => e
sql = "#{connection_info(conn) if conn && log_connection_info}#{sql}#{"; #{args.inspect}" if args}"
e.instance_variable_set(:@sequel_error_sql, sql)
raise
end
else
super
end
end
end
Database.register_extension(:error_sql, ErrorSQL)
end
sequel-5.63.0/lib/sequel/extensions/escaped_like.rb 0000664 0000000 0000000 00000010120 14342141206 0022272 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The escaped_like extension adds +escaped_like+ and +escaped_ilike+
# methods to Sequel::SQL::StringMethods, which allow them to be easily
# used with most of Sequel's expression objects. Example:
#
# DB[:table].where{string_column.escaped_like('?%', user_input)}
# # user_input is 'foo':
# # SELECT * FROM table WHERE string_column LIKE 'foo%' ESCAPE '\'
# # user_input is '%foo':
# # SELECT * FROM table WHERE string_column LIKE '\%foo%' ESCAPE '\'
#
# To load the extension:
#
# Sequel.extension :escaped_like
#
# Related modules: Sequel::SQL::StringMethods, Sequel::SQL::EscapedLikeExpression
#
module Sequel
module SQL
# Represents an pattern match SQL expression, where the pattern can depend
# upon interpolated values in a database-dependent manner.
class EscapedLikeExpression < Expression
include AliasMethods
include BooleanMethods
include CastMethods
include OrderMethods
# Initialize the expression. Arguments:
# expr :: Right hand site of LIKE/ILIKE operator, what you are matching against the pattern
# case_insensitive :: Whether the match is case sensitive
# placeholder_pattern :: The pattern to match against, with +?+ for the placeholders
# placeholder_values :: The string values for each +?+ in the placeholder pattern. Should be an
# array of strings, though it can be a single string if there is only
# a single placeholder.
def initialize(expr, case_sensitive, placeholder_pattern, placeholder_values)
@expr = expr
@method = case_sensitive ? :like : :ilike
@pattern = placeholder_pattern
unless placeholder_values.is_a?(Array)
placeholder_values = [placeholder_values].freeze
end
@values = placeholder_values
freeze
end
# Interpolate the pattern values into the placeholder pattern to get the final pattern,
# now that we have access to the dataset. Use the expression and final pattern and
# add an appropriate LIKE/ILIKE expression to the SQL being built.
def to_s_append(ds, sql)
i = -1
match_len = @values.length - 1
like_pattern = String.new
pattern = @pattern
while true
previous, q, pattern = pattern.partition('?')
like_pattern << previous
unless q.empty?
if i == match_len
raise Error, "Mismatched number of placeholders (#{i+1}) and placeholder arguments (#{@values.length}) for escaped like expression: #{@pattern.inspect}"
end
like_pattern << ds.escape_like(@values.at(i+=1))
end
if pattern.empty?
unless i == match_len
raise Error, "Mismatched number of placeholders (#{i+1}) and placeholder arguments (#{@values.length}) for escaped like expression: #{@pattern.inspect}"
end
break
end
end
ds.literal_append(sql, Sequel.send(@method, @expr, like_pattern))
end
end
module StringMethods
# Create a +EscapedLikeExpression+ case insensitive pattern match of the receiver
# with the patterns, interpolated escaped values for each +?+ placeholder in the
# pattern.
#
# Sequel[:a].escaped_ilike('?%', 'A') # "a" ILIKE 'A%' ESCAPE '\'
# Sequel[:a].escaped_ilike('?%', '%A') # "a" ILIKE '\%A%' ESCAPE '\'
def escaped_ilike(placeholder_pattern, placeholder_values)
EscapedLikeExpression.new(self, false, placeholder_pattern, placeholder_values)
end
# Create a +EscapedLikeExpression+ case sensitive pattern match of the receiver
# with the patterns, interpolated escaped values for each +?+ placeholder in the
# pattern.
#
# Sequel[:a].escaped_like('?%', 'A') # "a" LIKE 'A%' ESCAPE '\'
# Sequel[:a].escaped_like('?%', '%A') # "a" LIKE '\%A%' ESCAPE '\'
def escaped_like(placeholder_pattern, placeholder_values)
EscapedLikeExpression.new(self, true, placeholder_pattern, placeholder_values)
end
end
end
end
sequel-5.63.0/lib/sequel/extensions/eval_inspect.rb 0000664 0000000 0000000 00000012626 14342141206 0022353 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The eval_inspect extension changes #inspect for Sequel::SQL::Expression
# subclasses to return a string suitable for ruby's eval, such that
#
# eval(obj.inspect) == obj
#
# is true. The above code is true for most of ruby's simple classes such
# as String, Integer, Float, and Symbol, but it's not true for classes such
# as Time, Date, and BigDecimal. Sequel attempts to handle situations where
# instances of these classes are a component of a Sequel expression.
#
# To load the extension:
#
# Sequel.extension :eval_inspect
#
# Related module: Sequel::EvalInspect
#
module Sequel
module EvalInspect
# Special case objects where inspect does not generally produce input
# suitable for eval. Used by Sequel::SQL::Expression#inspect so that
# it can produce a string suitable for eval even if components of the
# expression have inspect methods that do not produce strings suitable
# for eval.
def eval_inspect(obj)
case obj
when BigDecimal
"Kernel::BigDecimal(#{obj.to_s.inspect})"
when Sequel::SQL::Blob, Sequel::LiteralString
"#{obj.class}.new(#{obj.to_s.inspect})"
when Sequel::SQL::ValueList
"#{obj.class}.new(#{obj.to_a.inspect})"
when Array
"[#{obj.map{|o| eval_inspect(o)}.join(', ')}]"
when Hash
"{#{obj.map{|k, v| "#{eval_inspect(k)} => #{eval_inspect(v)}"}.join(', ')}}"
when Time
datepart = "%Y-%m-%dT" unless obj.is_a?(Sequel::SQLTime)
"#{obj.class}.parse(#{obj.strftime("#{datepart}%T.%N%z").inspect})#{'.utc' if obj.utc?}"
when DateTime
# Ignore date of calendar reform
"DateTime.parse(#{obj.strftime('%FT%T.%N%z').inspect})"
when Date
# Ignore offset and date of calendar reform
"Date.new(#{obj.year}, #{obj.month}, #{obj.day})"
else
obj.inspect
end
end
end
extend EvalInspect
module SQL
class Expression
alias inspect inspect
# Attempt to produce a string suitable for eval, such that:
#
# eval(obj.inspect) == obj
def inspect
# Assume by default that the object can be recreated by calling
# self.class.new with any attr_reader values defined on the class,
# in the order they were defined.
klass = self.class
args = inspect_args.map do |arg|
if arg.is_a?(String) && arg =~ /\A\*/
# Special case string arguments starting with *, indicating that
# they should return an array to be splatted as the remaining arguments.
# Allow calling private methods to get inspect output.
send(arg.sub('*', '')).map{|a| Sequel.eval_inspect(a)}.join(', ')
else
# Allow calling private methods to get inspect output.
Sequel.eval_inspect(send(arg))
end
end
"#{klass}.#{inspect_new_method}(#{args.join(', ')})"
end
private
# Which attribute values to use in the inspect string.
def inspect_args
self.class.comparison_attrs
end
# Use the new method by default for creating new objects.
def inspect_new_method
:new
end
end
class ComplexExpression
private
# ComplexExpression's initializer uses a splat for the operator arguments.
def inspect_args
[:op, "*args"]
end
end
class Constant
# Constants to lookup in the Sequel module.
INSPECT_LOOKUPS = [:CURRENT_DATE, :CURRENT_TIMESTAMP, :CURRENT_TIME, :SQLTRUE, :SQLFALSE, :NULL, :NOTNULL]
# Reference the constant in the Sequel module if there is
# one that matches.
def inspect
INSPECT_LOOKUPS.each do |c|
return "Sequel::#{c}" if Sequel.const_get(c) == self
end
super
end
end
class CaseExpression
private
# CaseExpression's initializer checks whether an argument was
# provided, to differentiate CASE WHEN from CASE NULL WHEN, so
# check if an expression was provided, and only include the
# expression in the inspect output if so.
def inspect_args
if expression?
[:conditions, :default, :expression]
else
[:conditions, :default]
end
end
end
class Function
private
# Function uses a new! method for creating functions with options,
# since Function.new does not allow for an options hash.
def inspect_new_method
:new!
end
end
class JoinOnClause
private
# JoinOnClause's initializer takes the on argument as the first argument
# instead of the last.
def inspect_args
[:on, :join_type, :table_expr]
end
end
class JoinUsingClause
private
# JoinOnClause's initializer takes the using argument as the first argument
# instead of the last.
def inspect_args
[:using, :join_type, :table_expr]
end
end
class OrderedExpression
private
# OrderedExpression's initializer takes the :nulls information inside a hash,
# so if a NULL order was given, include a hash with that information.
def inspect_args
if nulls
[:expression, :descending, :opts_hash]
else
[:expression, :descending]
end
end
# A hash of null information suitable for passing to the initializer.
def opts_hash
{:nulls=>nulls}
end
end
end
end
sequel-5.63.0/lib/sequel/extensions/exclude_or_null.rb 0000664 0000000 0000000 00000004475 14342141206 0023065 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The exclude_or_null extension adds Dataset#exclude_or_null and
# Dataset#exclude_or_null_having. These methods are similar to
# Dataset#exclude and Dataset#exclude_having, except that they
# will also exclude rows where the condition IS NULL.
#
# DB[:table].exclude_or_null(foo: 1)
# # SELECT * FROM table WHERE NOT coalesce((foo = 1), false)
#
# DB[:table].exclude_or_null{foo(bar) =~ 1}
# # SELECT * FROM table HAVING NOT coalesce((foo(bar) = 1), false))
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:exclude_or_null)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:exclude_or_null)
#
# Note, this extension works correctly on PostgreSQL, SQLite, MySQL,
# H2, and HSQLDB. However, it does not work correctly on Microsoft SQL Server,
# Oracle, DB2, SQLAnywhere, or Derby.
#
# Related module: Sequel::ExcludeOrNull
#
module Sequel
module ExcludeOrNull
# Performs the inverse of Dataset#where, but also excludes rows where the given
# condition IS NULL.
#
# DB[:items].exclude_or_null(category: 'software')
# # SELECT * FROM items WHERE NOT coalesce((category = 'software'), false)
#
# DB[:items].exclude_or_null(category: 'software', id: 3)
# # SELECT * FROM items WHERE NOT coalesce(((category = 'software') AND (id = 3)), false)
def exclude_or_null(*cond, &block)
add_filter(:where, cond, :or_null, &block)
end
# The same as exclude_or_null, but affecting the HAVING clause instead of the
# WHERE clause.
#
# DB[:items].select_group(:name).exclude_or_null_having{count(name) < 2}
# # SELECT name FROM items GROUP BY name HAVING NOT coalesce((count(name) < 2), true)
def exclude_or_null_having(*cond, &block)
add_filter(:having, cond, :or_null, &block)
end
private
# Recognize :or_null value for invert, returning an expression for
# the invert of the condition or the condition being null.
def _invert_filter(cond, invert)
if invert == :or_null
~SQL::Function.new(:coalesce, cond, SQL::Constants::SQLFALSE)
else
super
end
end
end
Dataset.register_extension(:exclude_or_null, ExcludeOrNull)
end
sequel-5.63.0/lib/sequel/extensions/fiber_concurrency.rb 0000664 0000000 0000000 00000001204 14342141206 0023366 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The fiber_concurrency extension changes the default concurrency
# primitive in Sequel to be Fiber.current instead of Thread.current.
# This is the value used in various hash keys to implement safe
# concurrency (thread-safe concurrency by default, fiber-safe
# concurrency with this extension. It can be enabled via:
#
# Sequel.extension :fiber_concurrency
#
# Related module: Sequel::FiberConcurrency
require 'fiber'
module Sequel
module FiberConcurrency
# Make the current concurrency primitive be Fiber.current.
def current
Fiber.current
end
end
extend FiberConcurrency
end
sequel-5.63.0/lib/sequel/extensions/freeze_datasets.rb 0000664 0000000 0000000 00000000127 14342141206 0023040 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::Database.register_extension(:freeze_datasets){}
sequel-5.63.0/lib/sequel/extensions/from_block.rb 0000664 0000000 0000000 00000000122 14342141206 0022000 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::Database.register_extension(:from_block){}
sequel-5.63.0/lib/sequel/extensions/graph_each.rb 0000664 0000000 0000000 00000005324 14342141206 0021755 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The graph_each extension adds Dataset#graph_each and
# makes Dataset#each call #graph_each if the dataset has been graphed.
# Dataset#graph_each splits result hashes into subhashes per table:
#
# DB[:a].graph(:b, id: :b_id).all
# # => {:a=>{:id=>1, :b_id=>2}, :b=>{:id=>2}}
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:graph_each)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:graph_each)
#
# Related module: Sequel::GraphEach
#
module Sequel
module GraphEach
# Call graph_each for graphed datasets that are not being eager graphed.
def each
if @opts[:graph] && !@opts[:eager_graph]
graph_each{|r| yield r}
else
super
end
end
# Call graph_each for graphed datasets that are not being eager graphed.
def with_sql_each(sql)
if @opts[:graph] && !@opts[:eager_graph]
graph_each(sql){|r| yield r}
else
super
end
end
private
# Fetch the rows, split them into component table parts,
# tranform and run the row_proc on each part (if applicable),
# and yield a hash of the parts.
def graph_each(sql=select_sql)
# Reject tables with nil datasets, as they are excluded from
# the result set
datasets = @opts[:graph][:table_aliases].to_a.reject{|ta,ds| ds.nil?}
# Get just the list of table aliases into a local variable, for speed
table_aliases = datasets.map{|ta,ds| ta}
# Get an array of arrays, one for each dataset, with
# the necessary information about each dataset, for speed
datasets = datasets.map{|ta, ds| [ta, ds, ds.row_proc]}
# Use the manually set graph aliases, if any, otherwise
# use the ones automatically created by .graph
column_aliases = @opts[:graph][:column_aliases]
fetch_rows(sql) do |r|
graph = {}
# Create the sub hashes, one per table
table_aliases.each{|ta| graph[ta]={}}
# Split the result set based on the column aliases
# If there are columns in the result set that are
# not in column_aliases, they are ignored
column_aliases.each do |col_alias, tc|
ta, column = tc
graph[ta][column] = r[col_alias]
end
# For each dataset run the row_proc if applicable
datasets.each do |ta,ds,rp|
g = graph[ta]
graph[ta] = if g.values.any?{|x| !x.nil?}
rp ? rp.call(g) : g
else
nil
end
end
yield graph
end
self
end
end
Dataset.register_extension(:graph_each, GraphEach)
end
sequel-5.63.0/lib/sequel/extensions/identifier_mangling.rb 0000664 0000000 0000000 00000014246 14342141206 0023675 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The identifier_mangling extension adds support for to change
# the default identifier mangling for datasets, as well as all
# datasets for a given database.
#
# # Use uppercase identifiers in database, and lowercase in ruby.
# # Default behavior of Sequel, as the SQL standard behavior
# # folds unquoted identifiers to uppercase.
# DB.identifier_input_method = :upcase
# DB.identifier_output_method = :downcase
#
# # Don't modify identifiers.
# # Default behavior of Sequel on PostgreSQL, MySQL, SQLite,
# # as they fold unquoted identifiers to lowercase.
# DB.identifier_input_method = nil
# DB.identifier_output_method = nil
#
# You can also choose to turn on or off identifier quoting:
#
# # Quote identifiers. Sequel's default behavior.
# DB.quote_identifiers = true
#
# # Don't quote identifiers. Sequel's default behavior on DB2.
# DB.quote_identifiers = false
#
# To modify the identifiers on a per-dataset basis:
#
# ds = DB[:a].with_input_indentifier(:upcase).
# with_output_identifier(:downcase).
# with_quote_identifiers(true)
#
# To load the extension into the database:
#
# DB.extension :identifier_mangling
#
# Related modules: Sequel::IdentifierMangling::DatabaseMethods,
# Sequel::IdentifierMangling::DatasetMethods
#
module Sequel
module IdentifierMangling
module DatabaseMethods
def self.extended(db)
db.instance_exec do
@identifier_input_method = nil
@identifier_output_method = nil
@quote_identifiers = nil
reset_identifier_mangling
extend_datasets(DatasetMethods)
end
end
# The identifier input method to use by default for this database (default: adapter default)
attr_reader :identifier_input_method
# The identifier output method to use by default for this database (default: adapter default)
attr_reader :identifier_output_method
# Set the method to call on identifiers going into the database:
#
# DB[:items] # SELECT * FROM items
# DB.identifier_input_method = :upcase
# DB[:items] # SELECT * FROM ITEMS
def identifier_input_method=(v)
reset_default_dataset
@identifier_input_method = v
end
# Set the method to call on identifiers coming from the database:
#
# DB[:items].first # {:id=>1, :name=>'foo'}
# DB.identifier_output_method = :upcase
# DB[:items].first # {:ID=>1, :NAME=>'foo'}
def identifier_output_method=(v)
reset_default_dataset
@identifier_output_method = v
end
# Set whether to quote identifiers (columns and tables) for this database:
#
# DB[:items] # SELECT * FROM items
# DB.quote_identifiers = true
# DB[:items] # SELECT * FROM "items"
def quote_identifiers=(v)
reset_default_dataset
@quote_identifiers = v
end
# Returns true if the database quotes identifiers.
def quote_identifiers?
@quote_identifiers
end
private
# Return a dataset that uses the default identifier input and output methods
# for this database. Used when parsing metadata so that column symbols are
# returned as expected.
def _metadata_dataset
super.
with_identifier_input_method(identifier_input_method_default).
with_identifier_output_method(identifier_output_method_default)
end
# Upcase identifiers on input if database folds unquoted identifiers to
# uppercase.
def identifier_input_method_default
return super if defined?(super)
:upcase if folds_unquoted_identifiers_to_uppercase?
end
# Downcase identifiers on output if database folds unquoted identifiers to
# uppercase.
def identifier_output_method_default
return super if defined?(super)
:downcase if folds_unquoted_identifiers_to_uppercase?
end
# Reset the identifier mangling options. Overrides any already set on
# the instance. Only for internal use by shared adapters.
def reset_identifier_mangling
@quote_identifiers = @opts.fetch(:quote_identifiers, quote_identifiers_default)
@identifier_input_method = @opts.fetch(:identifier_input_method, identifier_input_method_default)
@identifier_output_method = @opts.fetch(:identifier_output_method, identifier_output_method_default)
reset_default_dataset
end
end
module DatasetMethods
# The String instance method to call on identifiers before sending them to
# the database.
def identifier_input_method
@opts.fetch(:identifier_input_method, db.identifier_input_method)
end
# The String instance method to call on identifiers before sending them to
# the database.
def identifier_output_method
@opts.fetch(:identifier_output_method, db.identifier_output_method)
end
# Check with the database to see if identifier quoting is enabled
def quote_identifiers?
@opts.fetch(:quote_identifiers, db.quote_identifiers?)
end
# Return a modified dataset with identifier_input_method set.
def with_identifier_input_method(meth)
clone(:identifier_input_method=>meth, :skip_symbol_cache=>true)
end
# Return a modified dataset with identifier_output_method set.
def with_identifier_output_method(meth)
clone(:identifier_output_method=>meth)
end
private
# Convert the identifier to the version used in the database via
# identifier_input_method.
def input_identifier(v)
(i = identifier_input_method) ? v.to_s.public_send(i) : v.to_s
end
# Modify the identifier returned from the database based on the
# identifier_output_method.
def output_identifier(v)
v = 'untitled' if v == ''
(i = identifier_output_method) ? v.to_s.public_send(i).to_sym : v.to_sym
end
def non_sql_option?(key)
super || key == :identifier_input_method || key == :identifier_output_method
end
end
end
Database.register_extension(:identifier_mangling, IdentifierMangling::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/implicit_subquery.rb 0000664 0000000 0000000 00000003032 14342141206 0023437 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The implicit_subquery extension changes most dataset methods that
# return modified datasets to implicitly call from_self if the database
# currently uses raw SQL. Sequel's by default does not do this:
#
# DB["SELECT * FROM table"].select(:column).sql
# # => "SELECT * FROM table"
#
# With this extension, datasets that use raw SQL are implicitly wrapped
# in a subquery:
#
# DB["SELECT * FROM table"].select(:column).sql
# # => "SELECT column FROM (SELECT * FROM table) AS t1"
#
# To add this extension to an existing dataset:
#
# ds = ds.extension(:implicit_subquery)
#
# To set this as the default behavior for all datasets on a single database:
#
# DB.extension(:implicit_subquery)
#
# Related module: Sequel::Dataset::ImplicitSubquery
#
module Sequel
class Dataset
module ImplicitSubquery
exceptions = [:add_graph_aliases, :filter, :from, :from_self, :naked, :or, :order_more,
:qualify, :reverse, :reverse_order, :select_all, :select_more, :server,
:set_graph_aliases, :unfiltered, :ungraphed, :ungrouped, :unlimited, :unordered,
:with_sql]
additions = [:join_table]
(Dataset::QUERY_METHODS - Dataset::JOIN_METHODS - exceptions + additions).each do |meth|
define_method(meth) do |*a, &b|
if opts[:sql]
from_self.public_send(meth, *a, &b)
else
super(*a, &b)
end
end
end
end
register_extension(:implicit_subquery, ImplicitSubquery)
end
end
sequel-5.63.0/lib/sequel/extensions/index_caching.rb 0000664 0000000 0000000 00000006616 14342141206 0022464 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The index_caching extension adds a few methods to Sequel::Database
# that make it easy to dump information about database indexes to a file,
# and load it from that file. Loading index information from a
# dumped file is faster than parsing it from the database, so this
# can save bootup time for applications with large numbers of index.
#
# Basic usage in application code:
#
# DB = Sequel.connect('...')
# DB.extension :index_caching
# DB.load_index_cache('/path/to/index_cache.dump')
#
# # load model files
#
# Then, whenever database indicies are modified, write a new cached
# file. You can do that with bin/sequel's -X option:
#
# bin/sequel -X /path/to/index_cache.dump postgres://...
#
# Alternatively, if you don't want to dump the index information for
# all tables, and you don't worry about race conditions, you can
# choose to use the following in your application code:
#
# DB = Sequel.connect('...')
# DB.extension :index_caching
# DB.load_index_cache?('/path/to/index_cache.dump')
#
# # load model files
#
# DB.dump_index_cache?('/path/to/index_cache.dump')
#
# With this method, you just have to delete the index dump file if
# the schema is modified, and the application will recreate it for you
# using just the tables that your models use.
#
# Note that it is up to the application to ensure that the dumped
# index cache reflects the current state of the database. Sequel
# does no checking to ensure this, as checking would take time and the
# purpose of this code is to take a shortcut.
#
# The index cache is dumped in Marshal format, since it is the fastest
# and it handles all ruby objects used in the indexes hash. Because of this,
# you should not attempt to load from an untrusted file.
#
# Related module: Sequel::IndexCaching
#
module Sequel
module IndexCaching
# Set index cache to the empty hash.
def self.extended(db)
db.instance_variable_set(:@indexes, {})
end
# Dump the index cache to the filename given in Marshal format.
def dump_index_cache(file)
File.open(file, 'wb'){|f| f.write(Marshal.dump(@indexes))}
nil
end
# Dump the index cache to the filename given unless the file
# already exists.
def dump_index_cache?(file)
dump_index_cache(file) unless File.exist?(file)
end
# Replace the index cache with the data from the given file, which
# should be in Marshal format.
def load_index_cache(file)
@indexes = Marshal.load(File.read(file))
nil
end
# Replace the index cache with the data from the given file if the
# file exists.
def load_index_cache?(file)
load_index_cache(file) if File.exist?(file)
end
# If no options are provided and there is cached index information for
# the table, return the cached information instead of querying the
# database.
def indexes(table, opts=OPTS)
return super unless opts.empty?
quoted_name = literal(table)
if v = Sequel.synchronize{@indexes[quoted_name]}
return v
end
result = super
Sequel.synchronize{@indexes[quoted_name] = result}
result
end
private
# Remove the index cache for the given schema name
def remove_cached_schema(table)
k = quote_schema_table(table)
Sequel.synchronize{@indexes.delete(k)}
super
end
end
Database.register_extension(:index_caching, IndexCaching)
end
sequel-5.63.0/lib/sequel/extensions/inflector.rb 0000664 0000000 0000000 00000022037 14342141206 0021661 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The inflector extension adds inflection instance methods to String, which allows the easy transformation of
# words from singular to plural, class names to table names, modularized class
# names to ones without, and class names to foreign keys. It exists for
# backwards compatibility to legacy Sequel code.
#
# To load the extension:
#
# Sequel.extension :inflector
#
# Related module: String::Inflections
class String
# This module acts as a singleton returned/yielded by String.inflections,
# which is used to override or specify additional inflection rules. Examples:
#
# String.inflections do |inflect|
# inflect.plural /^(ox)$/i, '\1\2en'
# inflect.singular /^(ox)en/i, '\1'
#
# inflect.irregular 'octopus', 'octopi'
#
# inflect.uncountable "equipment"
# end
#
# New rules are added at the top. So in the example above, the irregular rule for octopus will now be the first of the
# pluralization and singularization rules that is runs. This guarantees that your rules run before any of the rules that may
# already have been loaded.
module Inflections
@plurals, @singulars, @uncountables = [], [], []
class << self
# Array of 2 element arrays, first containing a regex, and the second containing a substitution pattern, used for plurization.
attr_reader :plurals
# Array of 2 element arrays, first containing a regex, and the second containing a substitution pattern, used for singularization.
attr_reader :singulars
# Array of strings for words were the singular form is the same as the plural form
attr_reader :uncountables
end
# Clears the loaded inflections within a given scope (default is :all). Give the scope as a symbol of the inflection type,
# the options are: :plurals, :singulars, :uncountables
#
# Examples:
# clear :all
# clear :plurals
def self.clear(scope = :all)
case scope
when :all
@plurals, @singulars, @uncountables = [], [], []
else
instance_variable_set("@#{scope}", [])
end
end
# Specifies a new irregular that applies to both pluralization and singularization at the same time. This can only be used
# for strings, not regular expressions. You simply pass the irregular in singular and plural form.
#
# Examples:
# irregular 'octopus', 'octopi'
# irregular 'person', 'people'
def self.irregular(singular, plural)
plural(Regexp.new("(#{singular[0,1]})#{singular[1..-1]}$", "i"), '\1' + plural[1..-1])
singular(Regexp.new("(#{plural[0,1]})#{plural[1..-1]}$", "i"), '\1' + singular[1..-1])
end
# Specifies a new pluralization rule and its replacement. The rule can either be a string or a regular expression.
# The replacement should always be a string that may include references to the matched data from the rule.
#
# Example:
# plural(/(x|ch|ss|sh)$/i, '\1es')
def self.plural(rule, replacement)
@plurals.insert(0, [rule, replacement])
end
# Specifies a new singularization rule and its replacement. The rule can either be a string or a regular expression.
# The replacement should always be a string that may include references to the matched data from the rule.
#
# Example:
# singular(/([^aeiouy]|qu)ies$/i, '\1y')
def self.singular(rule, replacement)
@singulars.insert(0, [rule, replacement])
end
# Add uncountable words that shouldn't be attempted inflected.
#
# Examples:
# uncountable "money"
# uncountable "money", "information"
# uncountable %w( money information rice )
def self.uncountable(*words)
(@uncountables << words).flatten!
end
require_relative '../model/default_inflections'
instance_exec(&Sequel::DEFAULT_INFLECTIONS_PROC)
end
# Yield the Inflections module if a block is given, and return
# the Inflections module.
def self.inflections
yield Inflections if defined?(yield)
Inflections
end
%w'classify constantize dasherize demodulize foreign_key humanize pluralize singularize tableize underscore'.each do |m|
# :nocov:
if method_defined?(m)
alias_method(m, m)
end
# :nocov:
end
# By default, camelize converts the string to UpperCamelCase. If the argument to camelize
# is set to :lower then camelize produces lowerCamelCase.
#
# camelize will also convert '/' to '::' which is useful for converting paths to namespaces
#
# Examples
# "active_record".camelize #=> "ActiveRecord"
# "active_record".camelize(:lower) #=> "activeRecord"
# "active_record/errors".camelize #=> "ActiveRecord::Errors"
# "active_record/errors".camelize(:lower) #=> "activeRecord::Errors"
def camelize(first_letter_in_uppercase = :upper)
s = gsub(/\/(.?)/){|x| "::#{x[-1..-1].upcase unless x == '/'}"}.gsub(/(^|_)(.)/){|x| x[-1..-1].upcase}
s[0...1] = s[0...1].downcase unless first_letter_in_uppercase == :upper
s
end
alias_method :camelcase, :camelize
# Singularizes and camelizes the string. Also strips out all characters preceding
# and including a period (".").
#
# Examples
# "egg_and_hams".classify #=> "EggAndHam"
# "post".classify #=> "Post"
# "schema.post".classify #=> "Post"
def classify
sub(/.*\./, '').singularize.camelize
end
# Constantize tries to find a declared constant with the name specified
# in the string. It raises a NameError when the name is not in CamelCase
# or is not initialized.
#
# Examples
# "Module".constantize #=> Module
# "Class".constantize #=> Class
def constantize
raise(NameError, "#{inspect} is not a valid constant name!") unless m = /\A(?:::)?([A-Z]\w*(?:::[A-Z]\w*)*)\z/.match(self)
Object.module_eval("::#{m[1]}", __FILE__, __LINE__)
end
# Replaces underscores with dashes in the string.
#
# Example
# "puni_puni".dasherize #=> "puni-puni"
def dasherize
gsub('_', '-')
end
# Removes the module part from the expression in the string
#
# Examples
# "ActiveRecord::CoreExtensions::String::Inflections".demodulize #=> "Inflections"
# "Inflections".demodulize #=> "Inflections"
def demodulize
gsub(/^.*::/, '')
end
# Creates a foreign key name from a class name.
# +use_underscore+ sets whether the method should put '_' between the name and 'id'.
#
# Examples
# "Message".foreign_key #=> "message_id"
# "Message".foreign_key(false) #=> "messageid"
# "Admin::Post".foreign_key #=> "post_id"
def foreign_key(use_underscore = true)
"#{demodulize.underscore}#{'_' if use_underscore}id"
end
# Capitalizes the first word and turns underscores into spaces and strips _id.
# Like titleize, this is meant for creating pretty output.
#
# Examples
# "employee_salary" #=> "Employee salary"
# "author_id" #=> "Author"
def humanize
gsub(/_id$/, "").gsub('_', " ").capitalize
end
# Returns the plural form of the word in the string.
#
# Examples
# "post".pluralize #=> "posts"
# "octopus".pluralize #=> "octopi"
# "sheep".pluralize #=> "sheep"
# "words".pluralize #=> "words"
# "the blue mailman".pluralize #=> "the blue mailmen"
# "CamelOctopus".pluralize #=> "CamelOctopi"
def pluralize
result = dup
Inflections.plurals.each{|(rule, replacement)| break if result.gsub!(rule, replacement)} unless Inflections.uncountables.include?(downcase)
result
end
# The reverse of pluralize, returns the singular form of a word in a string.
#
# Examples
# "posts".singularize #=> "post"
# "octopi".singularize #=> "octopus"
# "sheep".singluarize #=> "sheep"
# "word".singluarize #=> "word"
# "the blue mailmen".singularize #=> "the blue mailman"
# "CamelOctopi".singularize #=> "CamelOctopus"
def singularize
result = dup
Inflections.singulars.each{|(rule, replacement)| break if result.gsub!(rule, replacement)} unless Inflections.uncountables.include?(downcase)
result
end
# Underscores and pluralizes the string.
#
# Examples
# "RawScaledScorer".tableize #=> "raw_scaled_scorers"
# "egg_and_ham".tableize #=> "egg_and_hams"
# "fancyCategory".tableize #=> "fancy_categories"
def tableize
underscore.pluralize
end
# Capitalizes all the words and replaces some characters in the string to create
# a nicer looking title. Titleize is meant for creating pretty output.
#
# titleize is also aliased as as titlecase
#
# Examples
# "man from the boondocks".titleize #=> "Man From The Boondocks"
# "x-men: the last stand".titleize #=> "X Men: The Last Stand"
def titleize
underscore.humanize.gsub(/\b([a-z])/){|x| x[-1..-1].upcase}
end
alias_method :titlecase, :titleize
# The reverse of camelize. Makes an underscored form from the expression in the string.
# Also changes '::' to '/' to convert namespaces to paths.
#
# Examples
# "ActiveRecord".underscore #=> "active_record"
# "ActiveRecord::Errors".underscore #=> active_record/errors
def underscore
gsub(/::/, '/').gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2').
gsub(/([a-z\d])([A-Z])/,'\1_\2').tr("-", "_").downcase
end
end
sequel-5.63.0/lib/sequel/extensions/integer64.rb 0000664 0000000 0000000 00000002012 14342141206 0021472 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The integer64 extension changes the default type used for Integer
# to be the same type as used for :Bignum. In general, this means that
# instead of Integer resulting in a 32-bit database integer type, it will
# result in a 64-bit database integer type. This affects the default
# type used for primary_key and foreign_key when using the schema
# modification methods.
#
# Note that it doesn't make sense to use this extension on SQLite, since
# the integer type will automatically handle 64-bit integers, and it treats
# the integer type specially when the column is also the primary key.
#
# To load the extension into the database:
#
# DB.extension :integer64
#
# Related module: Sequel::Integer64
#
module Sequel
module Integer64
private
# Use same type as used for :Bignum by default for generic integer value.
def type_literal_generic_integer(column)
type_literal_generic_bignum_symbol(column)
end
end
Database.register_extension(:integer64, Integer64)
end
sequel-5.63.0/lib/sequel/extensions/is_distinct_from.rb 0000664 0000000 0000000 00000010234 14342141206 0023227 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The is_distinct_from extension adds the ability to use the
# SQL standard IS DISTINCT FROM operator, which is similar to the
# not equals operator, except that NULL values are considered
# equal. PostgreSQL, SQLite 3.39+, and H2 currently support this operator. On
# other databases, support is emulated.
#
# First, you need to load the extension into the database:
#
# DB.extension :is_distinct_from
#
# Then you can use the Sequel.is_distinct_from to create the expression
# objects:
#
# expr = Sequel.is_distinct_from(:column_a, :column_b)
# # (column_a IS DISTINCT FROM column_b)
#
# You can also use the +is_distinct_from+ method on most Sequel expressions:
#
# expr = Sequel[:column_a].is_distinct_from(:column_b)
# # (column_a IS DISTINCT FROM column_b)
#
# These expressions can be used in your datasets, or anywhere else that
# Sequel expressions are allowed:
#
# DB[:table].where(expr)
#
# Related module: Sequel::SQL::IsDistinctFrom
#
module Sequel
module SQL
module Builders
# Return a IsDistinctFrom expression object, using the IS DISTINCT FROM operator
# with the given left hand side and right hand side.
def is_distinct_from(lhs, rhs)
BooleanExpression.new(:NOOP, IsDistinctFrom.new(lhs, rhs))
end
end
# Represents an SQL expression using the IS DISTINCT FROM operator.
class IsDistinctFrom < GenericExpression
# These methods are added to expressions, allowing them to return IS DISTINCT
# FROM expressions based on the receiving expression.
module Methods
# Return a IsDistinctFrom expression, using the IS DISTINCT FROM operator,
# with the receiver as the left hand side and the argument as the right hand side.
def is_distinct_from(rhs)
BooleanExpression.new(:NOOP, IsDistinctFrom.new(self, rhs))
end
end
# These methods are added to datasets using the is_distinct_from extension
# extension, for the purposes of correctly literalizing IsDistinctFrom
# expressions for the appropriate database type.
module DatasetMethods
# Append the SQL fragment for the IS DISTINCT FROM expression to the SQL query.
def is_distinct_from_sql_append(sql, idf)
lhs = idf.lhs
rhs = idf.rhs
if supports_is_distinct_from?
sql << "("
literal_append(sql, lhs)
sql << " IS DISTINCT FROM "
literal_append(sql, rhs)
sql << ")"
elsif db.database_type == :derby && (lhs == nil || rhs == nil)
if lhs == nil && rhs == nil
sql << literal_false
elsif lhs == nil
literal_append(sql, ~Sequel.expr(rhs=>nil))
else
literal_append(sql, ~Sequel.expr(lhs=>nil))
end
else
literal_append(sql, Sequel.case({(Sequel.expr(lhs=>rhs) | [[lhs, nil], [rhs, nil]]) => 0}, 1) => 1)
end
end
private
# Whether the database supports IS DISTINCT FROM.
def supports_is_distinct_from?
if defined?(super)
return super
end
case db.database_type
when :postgres, :h2
true
when :sqlite
db.sqlite_version >= 33900
else
false
end
end
end
# The left hand side of the IS DISTINCT FROM expression.
attr_reader :lhs
# The right hand side of the IS DISTINCT FROM expression.
attr_reader :rhs
def initialize(lhs, rhs)
@lhs = lhs
@rhs = rhs
end
to_s_method :is_distinct_from_sql
end
end
class SQL::GenericExpression
include SQL::IsDistinctFrom::Methods
end
class LiteralString
include SQL::IsDistinctFrom::Methods
end
Dataset.register_extension(:is_distinct_from, SQL::IsDistinctFrom::DatasetMethods)
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::SQL::IsDistinctFrom::Methods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::SQL::IsDistinctFrom::Methods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/looser_typecasting.rb 0000664 0000000 0000000 00000002572 14342141206 0023613 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The LooserTypecasting extension loosens the default database typecasting
# for the following types:
#
# :float :: use to_f instead of Float()
# :integer :: use to_i instead of Integer()
# :decimal :: use 0.0 for unsupported strings
# :string :: silently allow hash and array conversion to string
#
# This also removes bytesize checks for string inputs for float, integer
# and decimal conversions.
#
# To load the extension into the database:
#
# DB.extension :looser_typecasting
#
# Related module: Sequel::LooserTypecasting
#
module Sequel
module LooserTypecasting
private
# Typecast the value to a Float using to_f instead of Kernel.Float
def typecast_value_float(value)
value.to_f
end
# Typecast the value to an Integer using to_i instead of Kernel.Integer
def typecast_value_integer(value)
value.to_i
end
# Typecast the value to an String using to_s instead of Kernel.String
def typecast_value_string(value)
value.to_s
end
if RUBY_VERSION >= '2.4'
def _typecast_value_string_to_decimal(value)
BigDecimal(value)
rescue
BigDecimal('0.0')
end
else
# :nocov:
def _typecast_value_string_to_decimal(value)
BigDecimal(value)
end
# :nocov:
end
end
Database.register_extension(:looser_typecasting, LooserTypecasting)
end
sequel-5.63.0/lib/sequel/extensions/migration.rb 0000664 0000000 0000000 00000064527 14342141206 0021677 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# Adds the Sequel::Migration and Sequel::Migrator classes, which allow
# the user to easily group schema changes and migrate the database
# to a newer version or revert to a previous version.
#
# To load the extension:
#
# Sequel.extension :migration
#
# Related modules: Sequel::Migration, Sequel::SimpleMigration,
# Sequel::MigrationDSL, Sequel::MigrationReverser, Sequel::MigrationAlterTableReverser,
# Sequel::Migrator, Sequel::IntegerMigrator, Sequel::TimestampMigrator
#
module Sequel
# Sequel's older migration class, available for backward compatibility.
# Uses subclasses with up and down instance methods for each migration:
#
# Class.new(Sequel::Migration) do
# def up
# create_table(:artists) do
# primary_key :id
# String :name
# end
# end
#
# def down
# drop_table(:artists)
# end
# end
#
# Part of the +migration+ extension.
class Migration
# Set the database associated with this migration.
def initialize(db)
@db = db
end
# Applies the migration to the supplied database in the specified
# direction.
def self.apply(db, direction)
raise(ArgumentError, "Invalid migration direction specified (#{direction.inspect})") unless [:up, :down].include?(direction)
new(db).public_send(direction)
end
# Returns the list of Migration descendants.
def self.descendants
@descendants ||= []
end
# Adds the new migration class to the list of Migration descendants.
def self.inherited(base)
descendants << base
end
# Don't allow transaction overriding in old migrations.
def self.use_transactions
nil
end
# The default down action does nothing
def down
end
# Intercepts method calls intended for the database and sends them along.
def method_missing(method_sym, *args, &block)
# Allow calling private methods for backwards compatibility
@db.send(method_sym, *args, &block)
end
# :nocov:
ruby2_keywords(:method_missing) if respond_to?(:ruby2_keywords, true)
# :nocov:
# This object responds to all methods the database responds to.
def respond_to_missing?(meth, include_private)
@db.respond_to?(meth, include_private)
end
# The default up action does nothing
def up
end
end
# Migration class used by the Sequel.migration DSL,
# using instances for each migration, unlike the
# +Migration+ class, which uses subclasses for each
# migration. Part of the +migration+ extension.
class SimpleMigration
# Proc used for the down action
attr_accessor :down
# Proc used for the up action
attr_accessor :up
# Whether to use transactions for this migration, default depends on the
# database.
attr_accessor :use_transactions
# Don't set transaction use by default.
def initialize
@use_transactions = nil
end
# Apply the appropriate block on the +Database+
# instance using instance_exec.
def apply(db, direction)
raise(ArgumentError, "Invalid migration direction specified (#{direction.inspect})") unless [:up, :down].include?(direction)
if prok = public_send(direction)
db.instance_exec(&prok)
end
end
end
# Internal class used by the Sequel.migration DSL, part of the +migration+ extension.
class MigrationDSL < BasicObject
# The underlying SimpleMigration instance
attr_reader :migration
def self.create(&block)
new(&block).migration
end
# Create a new migration class, and instance_exec the block.
def initialize(&block)
@migration = SimpleMigration.new
Migration.descendants << migration
instance_exec(&block)
end
# Defines the migration's down action.
def down(&block)
migration.down = block
end
# Disable the use of transactions for the related migration
def no_transaction
migration.use_transactions = false
end
# Enable the use of transactions for the related migration
def transaction
migration.use_transactions = true
end
# Defines the migration's up action.
def up(&block)
migration.up = block
end
# Creates a reversible migration. This is the same as creating
# the same block with +up+, but it also calls the block and attempts
# to create a +down+ block that will reverse the changes made by
# the block.
#
# There are no guarantees that this will work perfectly
# in all cases, but it works for some simple cases.
def change(&block)
migration.up = block
migration.down = MigrationReverser.new.reverse(&block)
end
end
# Handles the reversing of reversible migrations. Basically records
# supported methods calls, translates them to reversed calls, and
# returns them in reverse order.
class MigrationReverser < Sequel::BasicObject
def initialize
@actions = []
end
# Reverse the actions for the given block. Takes the block given
# and returns a new block that reverses the actions taken by
# the given block.
def reverse(&block)
begin
instance_exec(&block)
rescue
just_raise = true
end
if just_raise
Proc.new{raise Sequel::Error, "irreversible migration method used in #{block.source_location.first}, you may need to write your own down method"}
else
actions = @actions.reverse
Proc.new do
actions.each do |a|
pr = a.last.is_a?(Proc) ? a.pop : nil
# Allow calling private methods as the reversing methods are private
send(*a, &pr)
end
end
end
end
private
def add_column(*args)
@actions << [:drop_column, args[0], args[1]]
end
def add_index(*args)
@actions << [:drop_index, *args]
end
def alter_table(table, &block)
@actions << [:alter_table, table, MigrationAlterTableReverser.new.reverse(&block)]
end
def create_join_table(*args)
@actions << [:drop_join_table, *args]
end
def create_table(name, opts=OPTS)
@actions << [:drop_table, name, opts]
end
def create_view(name, _, opts=OPTS)
@actions << [:drop_view, name, opts]
end
def rename_column(table, name, new_name)
@actions << [:rename_column, table, new_name, name]
end
def rename_table(table, new_name)
@actions << [:rename_table, new_name, table]
end
end
# Handles reversing an alter_table block in a reversible migration.
class MigrationAlterTableReverser < Sequel::BasicObject
def initialize
@actions = []
end
def reverse(&block)
instance_exec(&block)
actions = @actions.reverse
# Allow calling private methods as the reversing methods are private
Proc.new{actions.each{|a| send(*a)}}
end
private
def add_column(*args)
@actions << [:drop_column, args.first]
end
def add_constraint(*args)
name = args.first
name = name.is_a?(Hash) ? name[:name] : name
@actions << [:drop_constraint, name]
end
def add_foreign_key(key, table, *args)
@actions << [:drop_foreign_key, key, *args]
end
def add_primary_key(*args)
raise if args.first.is_a?(Array)
@actions << [:drop_column, args.first]
end
def add_index(*args)
@actions << [:drop_index, *args]
end
alias add_full_text_index add_index
alias add_spatial_index add_index
def rename_column(name, new_name)
@actions << [:rename_column, new_name, name]
end
end
# The preferred method for writing Sequel migrations, using a DSL:
#
# Sequel.migration do
# up do
# create_table(:artists) do
# primary_key :id
# String :name
# end
# end
#
# down do
# drop_table(:artists)
# end
# end
#
# Designed to be used with the +Migrator+ class, part of the +migration+ extension.
def self.migration(&block)
MigrationDSL.create(&block)
end
# The +Migrator+ class performs migrations based on migration files in a
# specified directory. The migration files should be named using the
# following pattern:
#
# _.rb
#
# For example, the following files are considered migration files:
#
# 001_create_sessions.rb
# 002_add_data_column.rb
#
# You can also use timestamps as version numbers:
#
# 1273253850_create_sessions.rb
# 1273257248_add_data_column.rb
#
# If any migration filenames use timestamps as version numbers, Sequel
# uses the +TimestampMigrator+ to migrate, otherwise it uses the +IntegerMigrator+.
# The +TimestampMigrator+ can handle migrations that are run out of order
# as well as migrations with the same timestamp,
# while the +IntegerMigrator+ is more strict and raises exceptions for missing
# or duplicate migration files.
#
# The migration files should contain either one +Migration+
# subclass or one Sequel.migration call.
#
# Migrations are generally run via the sequel command line tool,
# using the -m and -M switches. The -m switch specifies the migration
# directory, and the -M switch specifies the version to which to migrate.
#
# You can apply migrations using the Migrator API, as well (this is necessary
# if you want to specify the version from which to migrate in addition to the version
# to which to migrate).
# To apply a migrator, the +apply+ method must be invoked with the database
# instance, the directory of migration files and the target version. If
# no current version is supplied, it is read from the database. The migrator
# automatically creates a table (schema_info for integer migrations and
# schema_migrations for timestamped migrations). in the database to keep track
# of the current migration version. If no migration version is stored in the
# database, the version is considered to be 0. If no target version is
# specified, or the target version specified is greater than the latest
# version available, the database is migrated to the latest version available in the
# migration directory.
#
# For example, to migrate the database to the latest version:
#
# Sequel::Migrator.run(DB, '.')
#
# For example, to migrate the database all the way down:
#
# Sequel::Migrator.run(DB, '.', target: 0)
#
# For example, to migrate the database to version 4:
#
# Sequel::Migrator.run(DB, '.', target: 4)
#
# To migrate the database from version 1 to version 5:
#
# Sequel::Migrator.run(DB, '.', target: 5, current: 1)
#
# Part of the +migration+ extension.
class Migrator
MIGRATION_FILE_PATTERN = /\A(\d+)_.+\.rb\z/i.freeze
# Mutex used around migration file loading
MUTEX = Mutex.new
# Exception class raised when there is an error with the migrator's
# file structure, database, or arguments.
class Error < Sequel::Error
end
# Exception class raised when Migrator.check_current signals that it is
# not current.
class NotCurrentError < Error
end
# Wrapper for +run+, maintaining backwards API compatibility
def self.apply(db, directory, target = nil, current = nil)
run(db, directory, :target => target, :current => current)
end
# Raise a NotCurrentError unless the migrator is current, takes the same
# arguments as #run.
def self.check_current(*args)
raise(NotCurrentError, 'current migration version does not match latest available version') unless is_current?(*args)
end
# Return whether the migrator is current (i.e. it does not need to make
# any changes). Takes the same arguments as #run.
def self.is_current?(db, directory, opts=OPTS)
migrator_class(directory).new(db, directory, opts).is_current?
end
# Migrates the supplied database using the migration files in the specified directory. Options:
# :allow_missing_migration_files :: Don't raise an error if there are missing migration files.
# It is very risky to use this option, since it can result in
# the database schema version number not matching the expected
# database schema.
# :column :: The column in the :table argument storing the migration version (default: :version).
# :current :: The current version of the database. If not given, it is retrieved from the database
# using the :table and :column options.
# :relative :: Run the given number of migrations, with a positive number being migrations to migrate
# up, and a negative number being migrations to migrate down (IntegerMigrator only).
# :table :: The table containing the schema version (default: :schema_info for integer migrations and
# :schema_migrations for timestamped migrations).
# :target :: The target version to which to migrate. If not given, migrates to the maximum version.
#
# Examples:
# Sequel::Migrator.run(DB, "migrations")
# Sequel::Migrator.run(DB, "migrations", target: 15, current: 10)
# Sequel::Migrator.run(DB, "app1/migrations", column: :app2_version)
# Sequel::Migrator.run(DB, "app2/migrations", column: :app2_version, table: :schema_info2)
def self.run(db, directory, opts=OPTS)
migrator_class(directory).new(db, directory, opts).run
end
# Choose the Migrator subclass to use. Uses the TimestampMigrator
# if the version number is greater than 20000101, otherwise uses the IntegerMigrator.
def self.migrator_class(directory)
if self.equal?(Migrator)
raise(Error, "Must supply a valid migration path") unless File.directory?(directory)
Dir.new(directory).each do |file|
next unless MIGRATION_FILE_PATTERN.match(file)
return TimestampMigrator if file.split('_', 2).first.to_i > 20000101
end
IntegerMigrator
else
self
end
end
# The column to use to hold the migration version number for integer migrations or
# filename for timestamp migrations (defaults to :version for integer migrations and
# :filename for timestamp migrations)
attr_reader :column
# The database related to this migrator
attr_reader :db
# The directory for this migrator's files
attr_reader :directory
# The dataset for this migrator, representing the +schema_info+ table for integer
# migrations and the +schema_migrations+ table for timestamp migrations
attr_reader :ds
# All migration files in this migrator's directory
attr_reader :files
# The table to use to hold the applied migration data (defaults to :schema_info for
# integer migrations and :schema_migrations for timestamp migrations)
attr_reader :table
# The target version for this migrator
attr_reader :target
# Setup the state for the migrator
def initialize(db, directory, opts=OPTS)
raise(Error, "Must supply a valid migration path") unless File.directory?(directory)
@db = db
@directory = directory
@allow_missing_migration_files = opts[:allow_missing_migration_files]
@files = get_migration_files
schema, table = @db.send(:schema_and_table, opts[:table] || default_schema_table)
@table = schema ? Sequel::SQL::QualifiedIdentifier.new(schema, table) : table
@column = opts[:column] || default_schema_column
@ds = schema_dataset
@use_transactions = opts[:use_transactions]
end
private
# If transactions should be used for the migration, yield to the block
# inside a transaction. Otherwise, just yield to the block.
def checked_transaction(migration, &block)
use_trans = if @use_transactions.nil?
if migration.use_transactions.nil?
@db.supports_transactional_ddl?
else
migration.use_transactions
end
else
@use_transactions
end
if use_trans
db.transaction(&block)
else
yield
end
end
# Load the migration file, raising an exception if the file does not define
# a single migration.
def load_migration_file(file)
MUTEX.synchronize do
n = Migration.descendants.length
load(file)
raise Error, "Migration file #{file.inspect} not containing a single migration detected" unless n + 1 == Migration.descendants.length
c = Migration.descendants.pop
if c.is_a?(Class) && !c.name.to_s.empty? && Object.const_defined?(c.name)
Object.send(:remove_const, c.name)
end
c
end
end
# Return the integer migration version based on the filename.
def migration_version_from_file(filename)
filename.split('_', 2).first.to_i
end
end
# The default migrator, recommended in most cases. Uses a simple incrementing
# version number starting with 1, where missing or duplicate migration file
# versions are not allowed. Part of the +migration+ extension.
class IntegerMigrator < Migrator
Error = Migrator::Error
# The current version for this migrator
attr_reader :current
# The direction of the migrator, either :up or :down
attr_reader :direction
# The migrations used by this migrator
attr_reader :migrations
# Set up all state for the migrator instance
def initialize(db, directory, opts=OPTS)
super
@current = opts[:current] || current_migration_version
latest_version = latest_migration_version
@target = if opts[:target]
opts[:target]
elsif opts[:relative]
@current + opts[:relative]
else
latest_version
end
raise(Error, "No target and/or latest version available, probably because no migration files found or filenames don't follow the migration filename convention") unless target && latest_version
if @target > latest_version
@target = latest_version
elsif @target < 0
@target = 0
end
@direction = current < target ? :up : :down
if @direction == :down && @current >= @files.length && !@allow_missing_migration_files
raise Migrator::Error, "Missing migration version(s) needed to migrate down to target version (current: #{current}, target: #{target})"
end
@migrations = get_migrations
end
# The integer migrator is current if the current version is the same as the target version.
def is_current?
current_migration_version == target
end
# Apply all migrations on the database
def run
migrations.zip(version_numbers).each do |m, v|
timer = Sequel.start_timer
db.log_info("Begin applying migration version #{v}, direction: #{direction}")
checked_transaction(m) do
m.apply(db, direction)
set_migration_version(up? ? v : v-1)
end
db.log_info("Finished applying migration version #{v}, direction: #{direction}, took #{sprintf('%0.6f', Sequel.elapsed_seconds_since(timer))} seconds")
end
target
end
private
# Gets the current migration version stored in the database. If no version
# number is stored, 0 is returned.
def current_migration_version
ds.get(column) || 0
end
# The default column storing schema version.
def default_schema_column
:version
end
# The default table storing schema version.
def default_schema_table
:schema_info
end
# Returns any found migration files in the supplied directory.
def get_migration_files
files = []
Dir.new(directory).each do |file|
next unless MIGRATION_FILE_PATTERN.match(file)
version = migration_version_from_file(file)
if version >= 20000101
raise Migrator::Error, "Migration number too large, must use TimestampMigrator: #{file}"
end
raise(Error, "Duplicate migration version: #{version}") if files[version]
files[version] = File.join(directory, file)
end
1.upto(files.length - 1){|i| raise(Error, "Missing migration version: #{i}") unless files[i]} unless @allow_missing_migration_files
files
end
# Returns a list of migration classes filtered for the migration range and
# ordered according to the migration direction.
def get_migrations
version_numbers.map{|n| load_migration_file(files[n])}
end
# Returns the latest version available in the specified directory.
def latest_migration_version
l = files.last
l ? migration_version_from_file(File.basename(l)) : nil
end
# Returns the dataset for the schema_info table. If no such table
# exists, it is automatically created.
def schema_dataset
c = column
ds = db.from(table)
db.create_table?(table){Integer c, :default=>0, :null=>false}
unless ds.columns.include?(c)
db.alter_table(table){add_column c, Integer, :default=>0, :null=>false}
end
ds.insert(c=>0) if ds.empty?
raise(Error, "More than 1 row in migrator table") if ds.count > 1
ds
end
# Sets the current migration version stored in the database.
def set_migration_version(version)
ds.update(column=>version)
end
# Whether or not this is an up migration
def up?
direction == :up
end
# An array of numbers corresponding to the migrations,
# so that each number in the array is the migration version
# that will be in affect after the migration is run.
def version_numbers
@version_numbers ||= begin
versions = files.
compact.
map{|f| migration_version_from_file(File.basename(f))}.
select{|v| up? ? (v > current && v <= target) : (v <= current && v > target)}.
sort
versions.reverse! unless up?
versions
end
end
end
# The migrator used if any migration file version is greater than 20000101.
# Stores filenames of migration files, and can figure out which migrations
# have not been applied and apply them, even if earlier migrations are added
# after later migrations. If you plan to do that, the responsibility is on
# you to make sure the migrations don't conflict. Part of the +migration+ extension.
class TimestampMigrator < Migrator
Error = Migrator::Error
# Array of strings of applied migration filenames
attr_reader :applied_migrations
# Get tuples of migrations, filenames, and actions for each migration
attr_reader :migration_tuples
# Set up all state for the migrator instance
def initialize(db, directory, opts=OPTS)
super
@target = opts[:target]
@applied_migrations = get_applied_migrations
@migration_tuples = get_migration_tuples
end
# The timestamp migrator is current if there are no migrations to apply
# in either direction.
def is_current?
migration_tuples.empty?
end
# Apply all migration tuples on the database
def run
migration_tuples.each do |m, f, direction|
t = Time.now
db.log_info("Begin applying migration #{f}, direction: #{direction}")
checked_transaction(m) do
m.apply(db, direction)
fi = f.downcase
direction == :up ? ds.insert(column=>fi) : ds.where(column=>fi).delete
end
db.log_info("Finished applying migration #{f}, direction: #{direction}, took #{sprintf('%0.6f', Time.now - t)} seconds")
end
nil
end
private
# Convert the schema_info table to the new schema_migrations table format,
# using the version of the schema_info table and the current migration files.
def convert_from_schema_info
v = db[:schema_info].get(:version)
ds = db.from(table)
files.each do |path|
f = File.basename(path)
if migration_version_from_file(f) <= v
ds.insert(column=>f)
end
end
end
# The default column storing migration filenames.
def default_schema_column
:filename
end
# The default table storing migration filenames.
def default_schema_table
:schema_migrations
end
# Returns filenames of all applied migrations
def get_applied_migrations
am = ds.select_order_map(column)
missing_migration_files = am - files.map{|f| File.basename(f).downcase}
raise(Error, "Applied migration files not in file system: #{missing_migration_files.join(', ')}") if missing_migration_files.length > 0 && !@allow_missing_migration_files
am
end
# Returns any migration files found in the migrator's directory.
def get_migration_files
files = []
Dir.new(directory).each do |file|
next unless MIGRATION_FILE_PATTERN.match(file)
files << File.join(directory, file)
end
files.sort_by{|f| MIGRATION_FILE_PATTERN.match(File.basename(f))[1].to_i}
end
# Returns tuples of migration, filename, and direction
def get_migration_tuples
up_mts = []
down_mts = []
files.each do |path|
f = File.basename(path)
fi = f.downcase
if target
if migration_version_from_file(f) > target
if applied_migrations.include?(fi)
down_mts << [load_migration_file(path), f, :down]
end
elsif !applied_migrations.include?(fi)
up_mts << [load_migration_file(path), f, :up]
end
elsif !applied_migrations.include?(fi)
up_mts << [load_migration_file(path), f, :up]
end
end
up_mts + down_mts.reverse
end
# Returns the dataset for the schema_migrations table. If no such table
# exists, it is automatically created.
def schema_dataset
c = column
ds = db.from(table)
if !db.table_exists?(table)
begin
db.create_table(table){String c, :primary_key=>true}
rescue Sequel::DatabaseError => e
if db.database_type == :mysql && e.message =~ /max key length/
# Handle case where MySQL is used with utf8mb4 charset default, which
# only allows a maximum length of about 190 characters for string
# primary keys due to InnoDB limitations.
db.create_table(table){String c, :primary_key=>true, :size=>190}
else
raise e
end
end
if db.table_exists?(:schema_info) and vha = db[:schema_info].all and vha.length == 1 and
vha.first.keys == [:version] and vha.first.values.first.is_a?(Integer)
convert_from_schema_info
end
elsif !ds.columns.include?(c)
raise(Error, "Migrator table #{table} does not contain column #{c}")
end
ds
end
end
end
sequel-5.63.0/lib/sequel/extensions/mssql_emulate_lateral_with_apply.rb 0000664 0000000 0000000 00000005367 14342141206 0026522 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The mssql_emulate_lateral_with_apply extension converts
# queries that use LATERAL into queries that use CROSS/OUTER
# APPLY, allowing code that works on databases that support
# LATERAL via Dataset#lateral to run on Microsoft SQL Server
# and Sybase SQLAnywhere.
#
# This is available as a separate extension instead of
# integrated into the Microsoft SQL Server and Sybase
# SQLAnywhere support because few people need it and there
# is a performance hit to code that doesn't use it.
#
# It is possible there are cases where this emulation does
# not work. Users should probably verify that correct
# results are returned when using this extension.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:mssql_emulate_lateral_with_apply)
#
# Or you can load it into all of a database's datasets:
#
# DB.extension(:mssql_emulate_lateral_with_apply)
#
# Related module: Sequel::MSSQL::EmulateLateralWithApply
#
module Sequel
module MSSQL
module EmulateLateralWithApply
# If the table is a dataset that uses LATERAL,
# convert it to a CROSS APPLY if it is a INNER
# or CROSS JOIN, and an OUTER APPLY if it is a
# LEFT JOIN.
def join_table(type, table, expr=nil, *)
if table.is_a?(Dataset) && table.opts[:lateral]
table = table.clone(:lateral=>nil)
case type
when :inner
type = :cross_apply
table = table.where(expr)
expr = nil
when :cross
type = :cross_apply
when :left, :left_outer
type = :outer_apply
table = table.where(expr)
expr = nil
end
end
super
end
# When a FROM entry uses a LATERAL subquery,
# convert that entry into a CROSS APPLY.
def from(*source, &block)
virtual_row_columns(source, block)
lateral, source = source.partition{|t| t.is_a?(Sequel::Dataset) && t.opts[:lateral] || (t.is_a?(Sequel::SQL::AliasedExpression) && t.expression.is_a?(Sequel::Dataset) && t.expression.opts[:lateral])} unless source.empty?
return super(*source, &nil) if !lateral || lateral.empty?
ds = from(*source)
lateral.each do |l|
l = if l.is_a?(Sequel::SQL::AliasedExpression)
l.expression.clone(:lateral=>nil).as(l.alias)
else
l.clone(:lateral=>nil)
end
ds = ds.cross_apply(l)
end
ds
end
# MSSQL can emulate lateral subqueries via CROSS/OUTER APPLY
# when using this extension.
def supports_lateral_subqueries?
true
end
end
end
Dataset.register_extension(:mssql_emulate_lateral_with_apply, MSSQL::EmulateLateralWithApply)
end
sequel-5.63.0/lib/sequel/extensions/named_timezones.rb 0000664 0000000 0000000 00000016567 14342141206 0023070 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# Allows the use of named timezones via TZInfo (requires tzinfo).
# Forces the use of DateTime as Sequel's datetime_class, since
# historically, Ruby's Time class doesn't support timezones other
# than local and UTC. To continue using Ruby's Time class when using
# the named_timezones extension:
#
# # Load the extension
# Sequel.extension :named_timezones
#
# # Set Sequel.datetime_class back to Time
# Sequel.datetime_class = Time
#
# This allows you to either pass strings or TZInfo::Timezone
# instance to Sequel.database_timezone=, application_timezone=, and
# typecast_timezone=. If a string is passed, it is converted to a
# TZInfo::Timezone using TZInfo::Timezone.get.
#
# Let's say you have the database server in New York and the
# application server in Los Angeles. For historical reasons, data
# is stored in local New York time, but the application server only
# services clients in Los Angeles, so you want to use New York
# time in the database and Los Angeles time in the application. This
# is easily done via:
#
# Sequel.database_timezone = 'America/New_York'
# Sequel.application_timezone = 'America/Los_Angeles'
#
# Then, before data is stored in the database, it is converted to New
# York time. When data is retrieved from the database, it is
# converted to Los Angeles time.
#
# If you are using database specific timezones, you may want to load
# this extension into the database in order to support similar API:
#
# DB.extension :named_timezones
# DB.timezone = 'America/New_York'
#
# Note that typecasting from the database timezone to the application
# timezone when fetching rows is dependent on the database adapter,
# and only works on adapters where Sequel itself does the conversion.
# It should work with the mysql, postgres, sqlite, ibmdb, and jdbc
# adapters.
#
# Related module: Sequel::NamedTimezones
require 'tzinfo'
#
module Sequel
self.datetime_class = DateTime
module NamedTimezones
module DatabaseMethods
def timezone=(tz)
super(Sequel.send(:convert_timezone_setter_arg, tz))
end
end
# Handles TZInfo::AmbiguousTime exceptions automatically by providing a
# proc called with both the datetime value being converted as well as
# the array of TZInfo::TimezonePeriod results. Example:
#
# Sequel.tzinfo_disambiguator = proc{|datetime, periods| periods.first}
attr_accessor :tzinfo_disambiguator
private
if RUBY_VERSION >= '2.6'
# Whether Time.at with :nsec and :in is broken. True on JRuby < 9.3.9.0.
BROKEN_TIME_AT_WITH_NSEC = defined?(JRUBY_VERSION) && (JRUBY_VERSION < '9.3' || (JRUBY_VERSION < '9.4' && JRUBY_VERSION.split('.')[2].to_i < 9))
private_constant :BROKEN_TIME_AT_WITH_NSEC
# Convert the given input Time (which must be in UTC) to the given input timezone,
# which should be a TZInfo::Timezone instance.
def convert_input_time_other(v, input_timezone)
Time.new(v.year, v.mon, v.day, v.hour, v.min, (v.sec + Rational(v.nsec, 1000000000)), input_timezone)
rescue TZInfo::AmbiguousTime
raise unless disamb = tzinfo_disambiguator_for(v)
period = input_timezone.period_for_local(v, &disamb)
offset = period.utc_total_offset
# :nocov:
if BROKEN_TIME_AT_WITH_NSEC
Time.at(v.to_i - offset, :in => input_timezone) + v.nsec/1000000000.0
# :nocov:
else
Time.at(v.to_i - offset, v.nsec, :nsec, :in => input_timezone)
end
end
# Convert the given input Time to the given output timezone,
# which should be a TZInfo::Timezone instance.
def convert_output_time_other(v, output_timezone)
# :nocov:
if BROKEN_TIME_AT_WITH_NSEC
Time.at(v.to_i, :in => output_timezone) + v.nsec/1000000000.0
# :nocov:
else
Time.at(v.to_i, v.nsec, :nsec, :in => output_timezone)
end
end
# :nodoc:
# :nocov:
else
def convert_input_time_other(v, input_timezone)
local_offset = input_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0
end
if defined?(TZInfo::VERSION) && TZInfo::VERSION > '2'
def convert_output_time_other(v, output_timezone)
v = output_timezone.utc_to_local(v.getutc)
local_offset = output_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0 + local_offset
end
else
def convert_output_time_other(v, output_timezone)
v = output_timezone.utc_to_local(v.getutc)
local_offset = output_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset
Time.new(1970, 1, 1, 0, 0, 0, local_offset) + v.to_i + v.nsec/1000000000.0
end
end
# :nodoc:
# :nocov:
end
# Handle both TZInfo 1 and TZInfo 2
if defined?(TZInfo::VERSION) && TZInfo::VERSION > '2'
def convert_input_datetime_other(v, input_timezone)
local_offset = Rational(input_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset, 86400)
(v - local_offset).new_offset(local_offset)
end
def convert_output_datetime_other(v, output_timezone)
v = output_timezone.utc_to_local(v.new_offset(0))
# Force DateTime output instead of TZInfo::DateTimeWithOffset
DateTime.jd(v.jd, v.hour, v.minute, v.second + v.sec_fraction, v.offset, v.start)
end
# :nodoc:
# :nocov:
else
# Assume the given DateTime has a correct time but a wrong timezone. It is
# currently in UTC timezone, but it should be converted to the input_timezone.
# Keep the time the same but convert the timezone to the input_timezone.
# Expects the input_timezone to be a TZInfo::Timezone instance.
def convert_input_datetime_other(v, input_timezone)
local_offset = input_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset_rational
(v - local_offset).new_offset(local_offset)
end
# Convert the given DateTime to use the given output_timezone.
# Expects the output_timezone to be a TZInfo::Timezone instance.
def convert_output_datetime_other(v, output_timezone)
# TZInfo 1 converts times, but expects the given DateTime to have an offset
# of 0 and always leaves the timezone offset as 0
v = output_timezone.utc_to_local(v.new_offset(0))
local_offset = output_timezone.period_for_local(v, &tzinfo_disambiguator_for(v)).utc_total_offset_rational
# Convert timezone offset from UTC to the offset for the output_timezone
(v - local_offset).new_offset(local_offset)
end
# :nodoc:
# :nocov:
end
# Returns TZInfo::Timezone instance if given a String.
def convert_timezone_setter_arg(tz)
tz.is_a?(String) ? TZInfo::Timezone.get(tz) : super
end
# Return a disambiguation proc that provides both the datetime value
# and the periods, in order to allow the choice of period to depend
# on the datetime value.
def tzinfo_disambiguator_for(v)
if pr = @tzinfo_disambiguator
proc{|periods| pr.call(v, periods)}
end
end
end
extend NamedTimezones
Database.register_extension(:named_timezones, NamedTimezones::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/no_auto_literal_strings.rb 0000664 0000000 0000000 00000000236 14342141206 0024622 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
Sequel::Database.register_extension(:no_auto_literal_strings){}
Sequel::Dataset.register_extension(:no_auto_literal_strings){}
sequel-5.63.0/lib/sequel/extensions/null_dataset.rb 0000664 0000000 0000000 00000006015 14342141206 0022351 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The null_dataset extension adds the Dataset#nullify method, which
# returns a cloned dataset that will never issue a query to the
# database. It implements the null object pattern for datasets.
#
# The most common usage is probably in a method that must return
# a dataset, where the method knows the dataset shouldn't return
# anything. With standard Sequel, you'd probably just add a
# WHERE condition that is always false, but that still results
# in a query being sent to the database, and can be overridden
# using #unfiltered, the OR operator, or a UNION.
#
# Usage:
#
# ds = DB[:items].nullify.where(a: :b).select(:c)
# ds.sql # => "SELECT c FROM items WHERE (a = b)"
# ds.all # => [] # no query sent to the database
#
# Note that there is one case where a null dataset will sent
# a query to the database. If you call #columns on a nulled
# dataset and the dataset doesn't have an already cached
# version of the columns, it will create a new dataset with
# the same options to get the columns.
#
# This extension uses Object#extend at runtime, which can hurt performance.
#
# To add the nullify method to a single dataset:
#
# ds = ds.extension(:null_dataset)
#
# To add the nullify method to all datasets on a single database:
#
# DB.extension(:null_dataset)
#
# Related modules: Sequel::Dataset::Nullifiable, Sequel::Dataset::NullDataset
#
module Sequel
class Dataset
module Nullifiable
# Return a cloned nullified dataset.
def nullify
cached_dataset(:_nullify_ds) do
with_extend(NullDataset)
end
end
end
module NullDataset
# Create a new dataset from the dataset (which won't
# be nulled) to get the columns if they aren't already cached.
def columns
if cols = _columns
return cols
end
self.columns = db.dataset.clone(@opts).columns
end
# Return 0 without sending a database query.
def delete
0
end
# Return self without sending a database query, never yielding.
def each
self
end
# Return nil without sending a database query, never yielding.
def fetch_rows(sql)
nil
end
# Return nil without sending a database query.
def insert(*)
nil
end
# Return nil without sending a database query.
def truncate
nil
end
# Return 0 without sending a database query.
def update(v=OPTS)
0
end
protected
# Return nil without sending a database query.
def _import(columns, values, opts)
nil
end
private
# Just in case these are called directly by some internal code,
# make them noops. There's nothing we can do if the db
# is accessed directly to make a change, though.
(%w'_ddl _dui _insert' << '').each do |m|
class_eval("private; def execute#{m}(sql, opts=OPTS) end", __FILE__, __LINE__)
end
end
end
Dataset.register_extension(:null_dataset, Dataset::Nullifiable)
end
sequel-5.63.0/lib/sequel/extensions/pagination.rb 0000664 0000000 0000000 00000010760 14342141206 0022025 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pagination extension adds the Sequel::Dataset#paginate and #each_page methods,
# which return paginated (limited and offset) datasets with the following methods
# added that make creating a paginated display easier:
#
# * +page_size+
# * +page_count+
# * +page_range+
# * +current_page+
# * +next_page+
# * +prev_page+
# * +first_page?+
# * +last_page?+
# * +pagination_record_count+
# * +current_page_record_count+
# * +current_page_record_range+
#
# This extension uses Object#extend at runtime, which can hurt performance.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:pagination)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:pagination)
#
# Related modules: Sequel::DatasetPagination, Sequel::Dataset::Pagination
#
module Sequel
module DatasetPagination
# Returns a paginated dataset. The returned dataset is limited to
# the page size at the correct offset, and extended with the Pagination
# module. If a record count is not provided, does a count of total
# number of records for this dataset.
def paginate(page_no, page_size, record_count=nil)
raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
record_count ||= count
page_count = (record_count / page_size.to_f).ceil
page_count = 1 if page_count == 0
limit(page_size, (page_no - 1) * page_size).
with_extend(Dataset::Pagination).
clone(:page_size=>page_size, :current_page=>page_no, :pagination_record_count=>record_count, :page_count=>page_count)
end
# Yields a paginated dataset for each page and returns the receiver. Does
# a count to find the total number of records for this dataset. Returns
# an enumerator if no block is given.
def each_page(page_size)
raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
return to_enum(:each_page, page_size) unless defined?(yield)
record_count = count
total_pages = (record_count / page_size.to_f).ceil
(1..total_pages).each{|page_no| yield paginate(page_no, page_size, record_count)}
self
end
end
class Dataset
# Holds methods that only relate to paginated datasets. Paginated dataset
# have pages starting at 1 (page 1 is offset 0, page 2 is offset 1 * page_size).
module Pagination
# The number of records per page (the final page may have fewer than
# this number of records).
def page_size
@opts[:page_size]
end
# The number of pages in the dataset before pagination, of which
# this paginated dataset is one. Empty datasets are considered
# to have a single page.
def page_count
@opts[:page_count]
end
# The current page of the dataset, starting at 1 and not 0.
def current_page
@opts[:current_page]
end
# The total number of records in the dataset before pagination.
def pagination_record_count
@opts[:pagination_record_count]
end
# Returns the record range for the current page
def current_page_record_range
return (0..0) if current_page > page_count
a = 1 + (current_page - 1) * page_size
b = a + page_size - 1
b = pagination_record_count if b > pagination_record_count
a..b
end
# Returns the number of records in the current page
def current_page_record_count
return 0 if current_page > page_count
a = 1 + (current_page - 1) * page_size
b = a + page_size - 1
b = pagination_record_count if b > pagination_record_count
b - a + 1
end
# Returns true if the current page is the first page
def first_page?
current_page == 1
end
# Returns true if the current page is the last page
def last_page?
current_page == page_count
end
# Returns the next page number or nil if the current page is the last page
def next_page
current_page < page_count ? (current_page + 1) : nil
end
# Returns the page range
def page_range
1..page_count
end
# Returns the previous page number or nil if the current page is the first
def prev_page
current_page > 1 ? (current_page - 1) : nil
end
end
end
Dataset.register_extension(:pagination, DatasetPagination)
end
sequel-5.63.0/lib/sequel/extensions/pg_array.rb 0000664 0000000 0000000 00000052366 14342141206 0021510 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_array extension adds support for Sequel to handle
# PostgreSQL's array types.
#
# This extension integrates with Sequel's native postgres adapter and
# the jdbc/postgresql adapter, so that when array fields are retrieved,
# they are parsed and returned as instances of Sequel::Postgres::PGArray.
# PGArray is a DelegateClass of Array, so it mostly acts like an array, but not
# completely (is_a?(Array) is false). If you want the actual array,
# you can call PGArray#to_a. This is done so that Sequel does not
# treat a PGArray like an Array by default, which would cause issues.
#
# In addition to the parsers, this extension comes with literalizers
# for PGArray using the standard Sequel literalization callbacks, so
# they work with on all adapters.
#
# To turn an existing Array into a PGArray:
#
# Sequel.pg_array(array)
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Array#pg_array:
#
# array.pg_array
#
# You can also provide a type, though in many cases it isn't necessary:
#
# Sequel.pg_array(array, :varchar) # or :integer, :"double precision", etc.
# array.pg_array(:varchar) # or :integer, :"double precision", etc.
#
# So if you want to insert an array into an integer[] database column:
#
# DB[:table].insert(column: Sequel.pg_array([1, 2, 3]))
#
# To use this extension, first load it into your Sequel::Database instance:
#
# DB.extension :pg_array
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using postgres array columns in CREATE/ALTER TABLE statements.
#
# This extension by default includes handlers for array types for
# all scalar types that the native postgres adapter handles. It
# also makes it easy to add support for other array types. In
# general, you just need to make sure that the scalar type is
# handled and has the appropriate converter installed. For user defined
# types, you can do this via:
#
# DB.add_conversion_proc(scalar_type_oid){|string| }
#
# Then you can call
# Sequel::Postgres::PGArray::DatabaseMethods#register_array_type
# to automatically set up a handler for the array type. So if you
# want to support the foo[] type (assuming the foo type is already
# supported):
#
# DB.register_array_type('foo')
#
# While this extension can parse PostgreSQL arrays with explicit bounds, it
# currently ignores explicit bounds, so such values do not round
# trip.
#
# If you want an easy way to call PostgreSQL array functions and
# operators, look into the pg_array_ops extension.
#
# This extension requires the delegate library, and the strscan library
# sequel_pg has not been loaded.
#
# Related module: Sequel::Postgres::PGArray
require 'delegate'
module Sequel
module Postgres
# Represents a PostgreSQL array column value.
class PGArray < DelegateClass(Array)
include Sequel::SQL::AliasMethods
module DatabaseMethods
BLOB_RANGE = 1...-1
# Create the local hash of database type strings to schema type symbols,
# used for array types local to this database.
def self.extended(db)
db.instance_exec do
@pg_array_schema_types ||= {}
register_array_type('timestamp without time zone', :oid=>1115, :scalar_oid=>1114, :type_symbol=>:datetime)
register_array_type('timestamp with time zone', :oid=>1185, :scalar_oid=>1184, :type_symbol=>:datetime_timezone, :scalar_typecast=>:datetime)
register_array_type('text', :oid=>1009, :scalar_oid=>25, :type_symbol=>:string)
register_array_type('integer', :oid=>1007, :scalar_oid=>23)
register_array_type('bigint', :oid=>1016, :scalar_oid=>20, :scalar_typecast=>:integer)
register_array_type('numeric', :oid=>1231, :scalar_oid=>1700, :type_symbol=>:decimal)
register_array_type('double precision', :oid=>1022, :scalar_oid=>701, :type_symbol=>:float)
register_array_type('boolean', :oid=>1000, :scalar_oid=>16)
register_array_type('bytea', :oid=>1001, :scalar_oid=>17, :type_symbol=>:blob)
register_array_type('date', :oid=>1182, :scalar_oid=>1082)
register_array_type('time without time zone', :oid=>1183, :scalar_oid=>1083, :type_symbol=>:time)
register_array_type('time with time zone', :oid=>1270, :scalar_oid=>1266, :type_symbol=>:time_timezone, :scalar_typecast=>:time)
register_array_type('smallint', :oid=>1005, :scalar_oid=>21, :scalar_typecast=>:integer)
register_array_type('oid', :oid=>1028, :scalar_oid=>26, :scalar_typecast=>:integer)
register_array_type('real', :oid=>1021, :scalar_oid=>700, :scalar_typecast=>:float)
register_array_type('character', :oid=>1014, :converter=>nil, :array_type=>:text, :scalar_typecast=>:string)
register_array_type('character varying', :oid=>1015, :converter=>nil, :scalar_typecast=>:string, :type_symbol=>:varchar)
register_array_type('xml', :oid=>143, :scalar_oid=>142)
register_array_type('money', :oid=>791, :scalar_oid=>790)
register_array_type('bit', :oid=>1561, :scalar_oid=>1560)
register_array_type('bit varying', :oid=>1563, :scalar_oid=>1562, :type_symbol=>:varbit)
register_array_type('uuid', :oid=>2951, :scalar_oid=>2950)
register_array_type('xid', :oid=>1011, :scalar_oid=>28)
register_array_type('cid', :oid=>1012, :scalar_oid=>29)
register_array_type('name', :oid=>1003, :scalar_oid=>19)
register_array_type('tid', :oid=>1010, :scalar_oid=>27)
register_array_type('int2vector', :oid=>1006, :scalar_oid=>22)
register_array_type('oidvector', :oid=>1013, :scalar_oid=>30)
[:string_array, :integer_array, :decimal_array, :float_array, :boolean_array, :blob_array, :date_array, :time_array, :datetime_array].each do |v|
@schema_type_classes[v] = PGArray
end
end
end
def add_named_conversion_proc(name, &block)
ret = super
name = name.to_s if name.is_a?(Symbol)
from(:pg_type).where(:typname=>name).select_map([:oid, :typarray]).each do |scalar_oid, array_oid|
register_array_type(name, :oid=>array_oid.to_i, :scalar_oid=>scalar_oid.to_i)
end
ret
end
# Handle arrays in bound variables
def bound_variable_arg(arg, conn)
case arg
when PGArray
bound_variable_array(arg.to_a)
when Array
bound_variable_array(arg)
else
super
end
end
# Freeze the pg array schema types to prevent adding new ones.
def freeze
@pg_array_schema_types.freeze
super
end
# Register a database specific array type. Options:
#
# :array_type :: The type to automatically cast the array to when literalizing the array.
# Usually the same as db_type.
# :converter :: A callable object (e.g. Proc), that is called with each element of the array
# (usually a string), and should return the appropriate typecasted object.
# :oid :: The PostgreSQL OID for the array type. This is used by the Sequel postgres adapter
# to set up automatic type conversion on retrieval from the database.
# :scalar_oid :: Should be the PostgreSQL OID for the scalar version of this array type. If given,
# automatically sets the :converter option by looking for scalar conversion
# proc.
# :scalar_typecast :: Should be a symbol indicating the typecast method that should be called on
# each element of the array, when a plain array is passed into a database
# typecast method. For example, for an array of integers, this could be set to
# :integer, so that the typecast_value_integer method is called on all of the
# array elements. Defaults to :type_symbol option.
# :type_symbol :: The base of the schema type symbol for this type. For example, if you provide
# :integer, Sequel will recognize this type as :integer_array during schema parsing.
# Defaults to the db_type argument.
#
# If a block is given, it is treated as the :converter option.
def register_array_type(db_type, opts=OPTS, &block)
oid = opts[:oid]
soid = opts[:scalar_oid]
if has_converter = opts.has_key?(:converter)
raise Error, "can't provide both a block and :converter option to register_array_type" if block
converter = opts[:converter]
else
has_converter = true if block
converter = block
end
unless (soid || has_converter) && oid
array_oid, scalar_oid = from(:pg_type).where(:typname=>db_type.to_s).get([:typarray, :oid])
soid ||= scalar_oid unless has_converter
oid ||= array_oid
end
db_type = db_type.to_s
type = (opts[:type_symbol] || db_type).to_sym
typecast_method_map = @pg_array_schema_types
if soid
raise Error, "can't provide both a converter and :scalar_oid option to register" if has_converter
converter = conversion_procs[soid]
end
array_type = (opts[:array_type] || db_type).to_s.dup.freeze
creator = Creator.new(array_type, converter)
add_conversion_proc(oid, creator)
typecast_method_map[db_type] = :"#{type}_array"
singleton_class.class_eval do
meth = :"typecast_value_#{type}_array"
scalar_typecast_method = :"typecast_value_#{opts.fetch(:scalar_typecast, type)}"
define_method(meth){|v| typecast_value_pg_array(v, creator, scalar_typecast_method)}
private meth
alias_method(meth, meth)
end
@schema_type_classes[:"#{type}_array"] = PGArray
nil
end
private
# Format arrays used in bound variables.
def bound_variable_array(a)
case a
when Array
"{#{a.map{|i| bound_variable_array(i)}.join(',')}}"
when Sequel::SQL::Blob
bound_variable_array_string(literal(a)[BLOB_RANGE].gsub("''", "'"))
when Sequel::LiteralString
a
when String
bound_variable_array_string(a)
else
if (s = bound_variable_arg(a, nil)).is_a?(String)
bound_variable_array_string(s)
else
literal(a)
end
end
end
# Escape strings used as array members in bound variables. Most complex
# will create a regular string with bound_variable_arg, and then use this
# escaping to format it as an array member.
def bound_variable_array_string(s)
"\"#{s.gsub(/("|\\)/, '\\\\\1')}\""
end
# Look into both the current database's array schema types and the global
# array schema types to get the type symbol for the given database type
# string.
def pg_array_schema_type(type)
@pg_array_schema_types[type]
end
# Make the column type detection handle registered array types.
def schema_column_type(db_type)
if (db_type =~ /\A([^(]+)(?:\([^(]+\))?\[\]\z/io) && (type = pg_array_schema_type($1))
type
else
super
end
end
# Set the :callable_default value if the default value is recognized as an empty array.
def schema_post_process(_)
super.each do |a|
h = a[1]
if h[:default] =~ /\A(?:'\{\}'|ARRAY\[\])::([\w ]+)\[\]\z/
type = $1.freeze
h[:callable_default] = lambda{Sequel.pg_array([], type)}
end
end
end
# Convert ruby arrays to PostgreSQL arrays when used as default values.
def column_definition_default_sql(sql, column)
if (d = column[:default]) && d.is_a?(Array) && !Sequel.condition_specifier?(d)
sql << " DEFAULT (#{literal(Sequel.pg_array(d))}::#{type_literal(column)})"
else
super
end
end
# Given a value to typecast and the type of PGArray subclass:
# * If given a PGArray with a matching array_type, use it directly.
# * If given a PGArray with a different array_type, return a PGArray
# with the creator's type.
# * If given an Array, create a new PGArray instance for it. This does not
# typecast all members of the array in ruby for performance reasons, but
# it will cast the array the appropriate database type when the array is
# literalized.
def typecast_value_pg_array(value, creator, scalar_typecast_method=nil)
case value
when PGArray
if value.array_type != creator.type
PGArray.new(value.to_a, creator.type)
else
value
end
when Array
if scalar_typecast_method && respond_to?(scalar_typecast_method, true)
value = Sequel.recursive_map(value, method(scalar_typecast_method))
end
PGArray.new(value, creator.type)
else
raise Sequel::InvalidValue, "invalid value for array type: #{value.inspect}"
end
end
end
unless Sequel::Postgres.respond_to?(:parse_pg_array)
require 'strscan'
# PostgreSQL array parser that handles PostgreSQL array output format.
# Note that does not handle all forms out input that PostgreSQL will
# accept, and it will not raise an error for all forms of invalid input.
class Parser < StringScanner
# Set the source for the input, and any converter callable
# to call with objects to be created. For nested parsers
# the source may contain text after the end current parse,
# which will be ignored.
def initialize(source, converter=nil)
super(source)
@converter = converter
@stack = [[]]
@encoding = string.encoding
@recorded = String.new.force_encoding(@encoding)
end
# Take the buffer of recorded characters and add it to the array
# of entries, and use a new buffer for recorded characters.
def new_entry(include_empty=false)
if !@recorded.empty? || include_empty
entry = @recorded
if entry == 'NULL' && !include_empty
entry = nil
elsif @converter
entry = @converter.call(entry)
end
@stack.last.push(entry)
@recorded = String.new.force_encoding(@encoding)
end
end
# Parse the input character by character, returning an array
# of parsed (and potentially converted) objects.
def parse
raise Sequel::Error, "invalid array, empty string" if eos?
raise Sequel::Error, "invalid array, doesn't start with {" unless scan(/((\[\d+:\d+\])+=)?\{/)
# :nocov:
while !eos?
# :nocov:
char = scan(/[{}",]|[^{}",]+/)
if char == ','
# Comma outside quoted string indicates end of current entry
new_entry
elsif char == '"'
raise Sequel::Error, "invalid array, opening quote with existing recorded data" unless @recorded.empty?
# :nocov:
while true
# :nocov:
char = scan(/["\\]|[^"\\]+/)
if char == '\\'
@recorded << getch
elsif char == '"'
n = peek(1)
raise Sequel::Error, "invalid array, closing quote not followed by comma or closing brace" unless n == ',' || n == '}'
break
else
@recorded << char
end
end
new_entry(true)
elsif char == '{'
raise Sequel::Error, "invalid array, opening brace with existing recorded data" unless @recorded.empty?
# Start of new array, add it to the stack
new = []
@stack.last << new
@stack << new
elsif char == '}'
# End of current array, add current entry to the current array
new_entry
if @stack.length == 1
raise Sequel::Error, "array parsing finished without parsing entire string" unless eos?
# Top level of array, parsing should be over.
# Pop current array off stack and return it as result
return @stack.pop
else
# Nested array, pop current array off stack
@stack.pop
end
else
# Add the character to the recorded character buffer.
@recorded << char
end
end
raise Sequel::Error, "array parsing finished with array unclosed"
end
end
end
# Callable object that takes the input string and parses it using Parser.
class Creator
# The converter callable that is called on each member of the array
# to convert it to the correct type.
attr_reader :converter
# The database type to set on the PGArray instances returned.
attr_reader :type
# Set the type and optional converter callable that will be used.
def initialize(type, converter=nil)
@type = type
@converter = converter
end
if Sequel::Postgres.respond_to?(:parse_pg_array)
# :nocov:
# Use sequel_pg's C-based parser if it has already been defined.
def call(string)
PGArray.new(Sequel::Postgres.parse_pg_array(string, @converter), @type)
end
# :nocov:
else
# Parse the string using Parser with the appropriate
# converter, and return a PGArray with the appropriate database
# type.
def call(string)
PGArray.new(Parser.new(string, @converter).parse, @type)
end
end
end
# The type of this array. May be nil if no type was given. If a type
# is provided, the array is automatically casted to this type when
# literalizing. This type is the underlying type, not the array type
# itself, so for an int4[] database type, it should be :int4 or 'int4'
attr_accessor :array_type
# Set the array to delegate to, and a database type.
def initialize(array, type=nil)
super(array)
@array_type = type
end
# Append the array SQL to the given sql string.
# If the receiver has a type, add a cast to the
# database array type.
def sql_literal_append(ds, sql)
at = array_type
if empty? && at
sql << "'{}'"
else
sql << "ARRAY"
_literal_append(sql, ds, to_a)
end
if at
sql << '::' << at.to_s << '[]'
end
end
# Allow automatic parameterization of the receiver if all elements can be
# can be automatically parameterized.
def sequel_auto_param_type(ds)
if array_type && all?{|x| nil == x || ds.send(:auto_param_type, x)}
"::#{array_type}[]"
end
end
private
# Recursive method that handles multi-dimensional
# arrays, surrounding each with [] and interspersing
# entries with ,.
def _literal_append(sql, ds, array)
sql << '['
comma = false
commas = ','
array.each do |i|
sql << commas if comma
if i.is_a?(Array)
_literal_append(sql, ds, i)
else
ds.literal_append(sql, i)
end
comma = true
end
sql << ']'
end
end
end
module SQL::Builders
# Return a Postgres::PGArray proxy for the given array and database array type.
def pg_array(v, array_type=nil)
case v
when Postgres::PGArray
if array_type.nil? || v.array_type == array_type
v
else
Postgres::PGArray.new(v.to_a, array_type)
end
when Array
Postgres::PGArray.new(v, array_type)
else
# May not be defined unless the pg_array_ops extension is used
pg_array_op(v)
end
end
end
Database.register_extension(:pg_array, Postgres::PGArray::DatabaseMethods)
end
# :nocov:
if Sequel.core_extensions?
class Array
# Return a PGArray proxy to the receiver, using a
# specific database type if given. This is mostly useful
# as a short cut for creating PGArray objects that didn't
# come from the database.
def pg_array(type=nil)
Sequel::Postgres::PGArray.new(self, type)
end
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Array do
def pg_array(type=nil)
Sequel::Postgres::PGArray.new(self, type)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_array_ops.rb 0000664 0000000 0000000 00000023412 14342141206 0022357 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_array_ops extension adds support to Sequel's DSL to make
# it easier to call PostgreSQL array functions and operators.
#
# To load the extension:
#
# Sequel.extension :pg_array_ops
#
# The most common usage is passing an expression to Sequel.pg_array_op:
#
# ia = Sequel.pg_array_op(:int_array_column)
#
# If you have also loaded the pg_array extension, you can use
# Sequel.pg_array as well:
#
# ia = Sequel.pg_array(:int_array_column)
#
# Also, on most Sequel expression objects, you can call the pg_array
# method:
#
# ia = Sequel[:int_array_column].pg_array
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#pg_array:
#
# ia = :int_array_column.pg_array
#
# This creates a Sequel::Postgres::ArrayOp object that can be used
# for easier querying:
#
# ia[1] # int_array_column[1]
# ia[1][2] # int_array_column[1][2]
#
# ia.contains(:other_int_array_column) # @>
# ia.contained_by(:other_int_array_column) # <@
# ia.overlaps(:other_int_array_column) # &&
# ia.concat(:other_int_array_column) # ||
#
# ia.push(1) # int_array_column || 1
# ia.unshift(1) # 1 || int_array_column
#
# ia.any # ANY(int_array_column)
# ia.all # ALL(int_array_column)
# ia.cardinality # cardinality(int_array_column)
# ia.dims # array_dims(int_array_column)
# ia.hstore # hstore(int_array_column)
# ia.hstore(:a) # hstore(int_array_column, a)
# ia.length # array_length(int_array_column, 1)
# ia.length(2) # array_length(int_array_column, 2)
# ia.lower # array_lower(int_array_column, 1)
# ia.lower(2) # array_lower(int_array_column, 2)
# ia.join # array_to_string(int_array_column, '')
# ia.join(':') # array_to_string(int_array_column, ':')
# ia.join(':', ' ') # array_to_string(int_array_column, ':', ' ')
# ia.unnest # unnest(int_array_column)
# ia.unnest(:b) # unnest(int_array_column, b)
#
# See the PostgreSQL array function and operator documentation for more
# details on what these functions and operators do.
#
# If you are also using the pg_array extension, you should load it before
# loading this extension. Doing so will allow you to use PGArray#op to get
# an ArrayOp, allowing you to perform array operations on array literals.
#
# In order for #hstore to automatically wrap the returned value correctly in
# an HStoreOp, you need to load the pg_hstore_ops extension.
#
# Related module: Sequel::Postgres::ArrayOp
#
module Sequel
module Postgres
# The ArrayOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# PostgreSQL array operators and functions.
#
# In the method documentation examples, assume that:
#
# array_op = :array.pg_array
class ArrayOp < Sequel::SQL::Wrapper
CONCAT = ["(".freeze, " || ".freeze, ")".freeze].freeze
CONTAINS = ["(".freeze, " @> ".freeze, ")".freeze].freeze
CONTAINED_BY = ["(".freeze, " <@ ".freeze, ")".freeze].freeze
OVERLAPS = ["(".freeze, " && ".freeze, ")".freeze].freeze
# Access a member of the array, returns an SQL::Subscript instance:
#
# array_op[1] # array[1]
def [](key)
s = Sequel::SQL::Subscript.new(self, [key])
s = ArrayOp.new(s) if key.is_a?(Range)
s
end
# Call the ALL function:
#
# array_op.all # ALL(array)
#
# Usually used like:
#
# dataset.where(1=>array_op.all)
# # WHERE (1 = ALL(array))
def all
function(:ALL)
end
# Call the ANY function:
#
# array_op.any # ANY(array)
#
# Usually used like:
#
# dataset.where(1=>array_op.any)
# # WHERE (1 = ANY(array))
def any
function(:ANY)
end
# Call the cardinality method:
#
# array_op.cardinality # cardinality(array)
def cardinality
function(:cardinality)
end
# Use the contains (@>) operator:
#
# array_op.contains(:a) # (array @> a)
def contains(other)
bool_op(CONTAINS, wrap_array(other))
end
# Use the contained by (<@) operator:
#
# array_op.contained_by(:a) # (array <@ a)
def contained_by(other)
bool_op(CONTAINED_BY, wrap_array(other))
end
# Call the array_dims method:
#
# array_op.dims # array_dims(array)
def dims
function(:array_dims)
end
# Convert the array into an hstore using the hstore function.
# If given an argument, use the two array form:
#
# array_op.hstore # hstore(array)
# array_op.hstore(:array2) # hstore(array, array2)
def hstore(arg=(no_arg_given=true; nil))
v = if no_arg_given
Sequel.function(:hstore, self)
else
Sequel.function(:hstore, self, wrap_array(arg))
end
# :nocov:
if Sequel.respond_to?(:hstore_op)
# :nocov:
v = Sequel.hstore_op(v)
end
v
end
# Call the array_length method:
#
# array_op.length # array_length(array, 1)
# array_op.length(2) # array_length(array, 2)
def length(dimension = 1)
function(:array_length, dimension)
end
# Call the array_lower method:
#
# array_op.lower # array_lower(array, 1)
# array_op.lower(2) # array_lower(array, 2)
def lower(dimension = 1)
function(:array_lower, dimension)
end
# Use the overlaps (&&) operator:
#
# array_op.overlaps(:a) # (array && a)
def overlaps(other)
bool_op(OVERLAPS, wrap_array(other))
end
# Use the concatentation (||) operator:
#
# array_op.push(:a) # (array || a)
# array_op.concat(:a) # (array || a)
def push(other)
array_op(CONCAT, [self, wrap_array(other)])
end
alias concat push
# Return the receiver.
def pg_array
self
end
# Remove the given element from the array:
#
# array_op.remove(1) # array_remove(array, 1)
def remove(element)
ArrayOp.new(function(:array_remove, element))
end
# Replace the given element in the array with another
# element:
#
# array_op.replace(1, 2) # array_replace(array, 1, 2)
def replace(element, replacement)
ArrayOp.new(function(:array_replace, element, replacement))
end
# Call the array_to_string method:
#
# array_op.join # array_to_string(array, '')
# array_op.to_string # array_to_string(array, '')
# array_op.join(":") # array_to_string(array, ':')
# array_op.join(":", "*") # array_to_string(array, ':', '*')
def to_string(joiner="", null=nil)
if null.nil?
function(:array_to_string, joiner)
else
function(:array_to_string, joiner, null)
end
end
alias join to_string
# Call the unnest method:
#
# array_op.unnest # unnest(array)
def unnest(*args)
function(:unnest, *args.map{|a| wrap_array(a)})
end
# Use the concatentation (||) operator, reversing the order:
#
# array_op.unshift(:a) # (a || array)
def unshift(other)
array_op(CONCAT, [wrap_array(other), self])
end
private
# Return a placeholder literal with the given str and args, wrapped
# in an ArrayOp, used by operators that return arrays.
def array_op(str, args)
ArrayOp.new(Sequel::SQL::PlaceholderLiteralString.new(str, args))
end
# Return a placeholder literal with the given str and args, wrapped
# in a boolean expression, used by operators that return booleans.
def bool_op(str, other)
Sequel::SQL::BooleanExpression.new(:NOOP, Sequel::SQL::PlaceholderLiteralString.new(str, [value, other]))
end
# Return a function with the given name, and the receiver as the first
# argument, with any additional arguments given.
def function(name, *args)
SQL::Function.new(name, self, *args)
end
# Automatically wrap argument in a PGArray if it is a plain Array.
# Requires that the pg_array extension has been loaded to work.
def wrap_array(arg)
if arg.instance_of?(Array)
Sequel.pg_array(arg)
else
arg
end
end
end
module ArrayOpMethods
# Wrap the receiver in an ArrayOp so you can easily use the PostgreSQL
# array functions and operators with it.
def pg_array
ArrayOp.new(self)
end
end
# :nocov:
if defined?(PGArray)
# :nocov:
class PGArray
# Wrap the PGArray instance in an ArrayOp, allowing you to easily use
# the PostgreSQL array functions and operators with literal arrays.
def op
ArrayOp.new(self)
end
end
end
end
module SQL::Builders
# Return the object wrapped in an Postgres::ArrayOp.
def pg_array_op(v)
case v
when Postgres::ArrayOp
v
else
Postgres::ArrayOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::Postgres::ArrayOpMethods
end
class LiteralString
include Sequel::Postgres::ArrayOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::ArrayOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::ArrayOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_auto_parameterize.rb 0000664 0000000 0000000 00000041213 14342141206 0024077 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# This extension changes Sequel's postgres adapter to automatically
# parameterize queries by default. Sequel's default behavior has always
# been to literalize all arguments unless specifically using
# parameters (via :$arg placeholders and the Dataset#prepare/call methods).
# This extension makes Sequel use string, numeric, blob, date, and
# time types as parameters. Example:
#
# # Default
# DB[:test].where(:a=>1)
# # SQL: SELECT * FROM test WHERE a = 1
#
# DB.extension :pg_auto_parameterize
# DB[:test].where(:a=>1)
# # SQL: SELECT * FROM test WHERE a = $1 (args: [1])
#
# Other pg_* extensions that ship with Sequel and add support for
# PostgreSQL-specific types support automatically parameterizing those
# types when used with this extension.
#
# This extension is not generally faster than the default behavior.
# In some cases it is faster, such as when using large strings.
# However, the use of parameters avoids potential security issues,
# in case Sequel does not correctly literalize one of the arguments
# that this extension would automatically parameterize.
#
# There are some known issues with automatic parameterization:
#
# 1. In order to avoid most type errors, the extension attempts to guess
# the appropriate type and automatically casts most placeholders,
# except plain Ruby strings (which PostgreSQL treats as an unknown
# type).
#
# Unfortunately, if the type guess is incorrect, or a plain Ruby
# string is used and PostgreSQL cannot determine the data type for it,
# the query may result in a DatabaseError. To fix both issues, you can
# explicitly cast values using Sequel.cast(value, type), and
# Sequel will cast to that type.
#
# 2. PostgreSQL supports a maximum of 65535 parameters per query.
# Attempts to use a query with more than this number of parameters
# will result in a Sequel::DatabaseError being raised. Sequel tries
# to mitigate this issue by turning column IN (int, ...)
# queries into column = ANY(CAST($ AS int8[])) using an
# array parameter, to reduce the number of parameters. It also limits
# inserting multiple rows at once to a maximum of 40 rows per query by
# default. While these mitigations handle the most common cases
# where a large number of parameters would be used, there are other
# cases.
#
# 3. Automatic parameterization will consider the same objects as
# equivalent when building SQL. However, for performance, it does
# not perform equality checks. So code such as:
#
# DB[:t].select{foo('a').as(:f)}.group{foo('a')}
# # SELECT foo('a') AS "f" FROM "t" GROUP BY foo('a')
#
# Will get auto paramterized as:
#
# # SELECT foo($1) AS "f" FROM "t" GROUP BY foo($2)
#
# Which will result in a DatabaseError, since that is not valid SQL.
#
# If you use the same expression, it will use the same parameter:
#
# foo = Sequel.function(:foo, 'a')
# DB[:t].select(foo.as(:f)).group(foo)
# # SELECT foo($1) AS "f" FROM "t" GROUP BY foo($1)
#
# Note that Dataset#select_group and similar methods that take arguments
# used in multiple places in the SQL will generally handle this
# automatically, since they will use the same objects:
#
# DB[:t].select_group{foo('a').as(:f)}
# # SELECT foo($1) AS "f" FROM "t" GROUP BY foo($1)
#
# You can work around any issues that come up by disabling automatic
# parameterization by calling the +no_auto_parameterize+ method on the
# dataset (which returns a clone of the dataset). You can avoid
# parameterization for specific values in the query by wrapping them
# with +Sequel.skip_pg_auto_param+.
#
# It is likely there are corner cases not mentioned above
# when using this extension. Users are encouraged to provide feedback
# when using this extension if they come across such corner cases.
#
# This extension is only compatible when using the pg driver, not
# when using the sequel-postgres-pr, jeremyevans-postgres-pr, or
# postgres-pr drivers, as those do not support bound variables.
#
# Related module: Sequel::Postgres::AutoParameterize
module Sequel
module Postgres
# Enable automatically parameterizing queries.
module AutoParameterize
# SQL query string that also holds an array of parameters
class QueryString < ::String
# The array of parameters used by this query.
attr_reader :args
# Add a new parameter to this query, which adds
# the parameter to the array of parameters, and an
# SQL placeholder to the query itself.
def add_arg(s)
unless defined?(@args)
@args = []
@arg_map = {}
@arg_map.compare_by_identity
end
unless pos = @arg_map[s]
@args << s
pos = @arg_map[s] = @args.length.to_s
end
self << '$' << pos
end
# Return a new QueryString with the given string appended
# to the receiver, and the same arguments.
def +(other)
v = self.class.new(super)
v.instance_variable_set(:@args, @args) if @args
v
end
# Whether this query string currently supports
# automatic parameterization. Automatic parameterization
# is disabled at certain points during query building where
# PostgreSQL does not support it.
def auto_param?
!@skip_auto_param
end
# Skip automatic parameterization inside the passed block.
# This is used during query generation to disable
# automatic parameterization for clauses not supporting it.
def skip_auto_param
skip_auto_param = @skip_auto_param
begin
@skip_auto_param = true
yield
ensure
@skip_auto_param = skip_auto_param
end
end
# Freeze the stored arguments when freezing the query string.
def freeze
@args.freeze if @args
super
end
# Show args when the query string is inspected
def inspect
@args ? "#{self}; #{@args.inspect}".inspect : super
end
end
# Wrapper class that skips auto parameterization for the wrapped object.
class SkipAutoParam < SQL::Wrapper
def to_s_append(ds, sql)
if sql.is_a?(QueryString)
sql.skip_auto_param{super}
else
super
end
end
end
module DatabaseMethods
def self.extended(db)
unless (db.adapter_scheme == :postgres && USES_PG) || (db.adapter_scheme == :mock && db.database_type == :postgres)
raise Error, "pg_auto_parameterize is only supported when using the postgres adapter with the pg driver"
end
db.extend_datasets(DatasetMethods)
end
# If the sql string has an embedded parameter array,
# extract the parameter values from that.
def execute(sql, opts={})
if sql.is_a?(QueryString) && (args = sql.args)
opts = opts.merge(:arguments=>args)
end
super
end
private
# Disable auto_parameterization during COPY TABLE.
def copy_table_sql(table, opts=OPTS)
table = _no_auto_parameterize(table)
super
end
# Disable auto_parameterization during CREATE TABLE AS.
def create_table_as(name, sql, options)
sql = _no_auto_parameterize(sql)
super
end
# Disable auto_parameterization during CREATE VIEW.
def create_view_sql(name, source, options)
source = _no_auto_parameterize(source)
super
end
# Disable automatic parameterization for the given table if supported.
def _no_auto_parameterize(table)
if table.is_a?(DatasetMethods)
table.no_auto_parameterize
else
table
end
end
end
module DatasetMethods
# Return a clone of the dataset that will not do
# automatic parameterization.
def no_auto_parameterize
cached_dataset(:_no_auto_parameterize_ds) do
@opts[:no_auto_parameterize] ? self : clone(:no_auto_parameterize=>true)
end
end
# Do not add implicit typecasts for directly typecasted values,
# since the user is presumably doing so to set the type, not convert
# from the implicitly typecasted type.
def cast_sql_append(sql, expr, type)
if auto_param?(sql) && auto_param_type(expr)
sql << 'CAST('
sql.add_arg(expr)
sql << ' AS ' << db.cast_type_literal(type).to_s << ')'
else
super
end
end
# Transform column IN (int, ...) expressions into column = ANY($)
# and column NOT IN (int, ...) expressions into column != ALL($)
# using an integer array bound variable for the ANY/ALL argument.
# This is the same optimization PostgreSQL performs internally,
# but this reduces the number of bound variables.
def complex_expression_sql_append(sql, op, args)
case op
when :IN, :"NOT IN"
l, r = args
if auto_param?(sql) && !l.is_a?(Array) && _integer_array?(r) && r.size > 1
if op == :IN
op = :"="
func = :ANY
else
op = :!=
func = :ALL
end
args = [l, Sequel.function(func, Sequel.cast(_integer_array_auto_param(r), 'int8[]'))]
end
end
super
end
# Parameterize insertion of multiple values
def multi_insert_sql(columns, values)
if @opts[:no_auto_parameterize]
super
else
[clone(:multi_insert_values=>values.map{|r| Array(r)}).insert_sql(columns, LiteralString.new('VALUES '))]
end
end
# For strings, numeric arguments, and date/time arguments, add
# them as parameters to the query instead of literalizing them
# into the SQL.
def literal_append(sql, v)
if auto_param?(sql) && (type = auto_param_type(v))
sql.add_arg(v) << type
else
super
end
end
# Placeholder literalizers are not supported supported when using automatic parameterization.
def supports_placeholder_literalizer?
@opts[:no_auto_parameterize]
end
# Disable automatic parameterization when using a cursor.
def use_cursor(*)
super.no_auto_parameterize
end
# Store receiving dataset and args when with_sql is used with a method name symbol, so sql
# can be parameterized correctly if used as a subselect.
def with_sql(*a)
ds = super
if Symbol === a[0]
ds = ds.clone(:with_sql_dataset=>self, :with_sql_args=>a.freeze)
end
ds
end
protected
# Disable automatic parameterization for prepared statements,
# since they will use manual parameterization.
def to_prepared_statement(*a)
@opts[:no_auto_parameterize] ? super : no_auto_parameterize.to_prepared_statement(*a)
end
private
# If auto parameterization is supported for the value, return a string
# for the implicit typecast to use. Return false/nil if the value should not be
# automatically parameterized.
def auto_param_type(v)
case v
when String
case v
when LiteralString
false
when Sequel::SQL::Blob
"::bytea"
else
""
end
when Integer
((v > 2147483647 || v < -2147483648) ? "::int8" : "::int4")
when Float
# PostgreSQL treats literal floats as numeric, not double precision
# But older versions of PostgreSQL don't handle Infinity/NaN in numeric
v.finite? ? "::numeric" : "::double precision"
when BigDecimal
"::numeric"
when Sequel::SQLTime
"::time"
when Time
"::#{@db.cast_type_literal(Time)}"
when DateTime
"::#{@db.cast_type_literal(DateTime)}"
when Date
"::date"
else
v.respond_to?(:sequel_auto_param_type) ? v.sequel_auto_param_type(self) : auto_param_type_fallback(v)
end
end
# Allow other extensions to support auto parameterization in ways that do not
# require adding the sequel_auto_param_type method.
def auto_param_type_fallback(v)
super if defined?(super)
end
# Whether the given query string currently supports automatic parameterization.
def auto_param?(sql)
sql.is_a?(QueryString) && sql.auto_param?
end
# Default the import slice to 40, since PostgreSQL supports a maximum of 1600
# columns per table, and it supports a maximum of 65k parameters. Technically,
# there can be more than one parameter per column, so this doesn't prevent going
# over the limit, though it does make it less likely.
def default_import_slice
40
end
# Handle parameterization of multi_insert_sql
def _insert_values_sql(sql, values)
super
if values = @opts[:multi_insert_values]
expression_list_append(sql, values.map{|r| Array(r)})
end
end
# Whether the given argument is an array of integers or NULL values, recursively.
def _integer_array?(v)
Array === v && v.all?{|x| nil == x || Integer === x}
end
# Create the bound variable string that will be used for the IN (int, ...) to = ANY($)
# optimization for integer arrays.
def _integer_array_auto_param(v)
buf = String.new
buf << '{'
comma = false
v.each do |x|
if comma
buf << ","
else
comma = true
end
buf << (x ? x.to_s : 'NULL')
end
buf << '}'
end
# Skip auto parameterization in LIMIT and OFFSET clauses
def select_limit_sql(sql)
if auto_param?(sql) && (@opts[:limit] || @opts[:offset])
sql.skip_auto_param{super}
else
super
end
end
# Skip auto parameterization in ORDER clause if used with
# integer values indicating ordering by the nth column.
def select_order_sql(sql)
if auto_param?(sql) && (order = @opts[:order]) && order.any?{|o| Integer === o || (SQL::OrderedExpression === o && Integer === o.expression)}
sql.skip_auto_param{super}
else
super
end
end
# Skip auto parameterization in CTE CYCLE clause
def select_with_sql_cte_search_cycle(sql,cte)
if auto_param?(sql) && cte[:cycle]
sql.skip_auto_param{super}
else
super
end
end
# Unless auto parameterization is disabled, use a string that
# can store the parameterized arguments.
def sql_string_origin
@opts[:no_auto_parameterize] ? super : QueryString.new
end
# If subquery uses with_sql with a method name symbol, get the dataset
# with_sql was called on, and use that as the subquery, recording the
# arguments to with_sql that will be used to calculate the sql.
def subselect_sql_dataset(sql, ds)
if ws_ds = ds.opts[:with_sql_dataset]
super(sql, ws_ds).clone(:subselect_sql_args=>ds.opts[:with_sql_args])
else
super
end
end
# If subquery used with_sql with a method name symbol, use the arguments to
# with_sql to determine the sql, so that the subselect can be parameterized.
def subselect_sql_append_sql(sql, ds)
if args = ds.opts[:subselect_sql_args]
ds.send(*args)
else
super
end
end
# Use auto parameterization for datasets with static SQL using placeholders.
def static_sql(sql)
if @opts[:append_sql] || @opts[:no_auto_parameterize] || String === sql
super
else
query_string = QueryString.new
literal_append(query_string, sql)
query_string
end
end
end
end
end
module SQL::Builders
# Skip auto parameterization for the given object when building queries.
def skip_pg_auto_param(v)
Postgres::AutoParameterize::SkipAutoParam.new(v)
end
end
Database.register_extension(:pg_auto_parameterize, Postgres::AutoParameterize::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/pg_enum.rb 0000664 0000000 0000000 00000015014 14342141206 0021323 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_enum extension adds support for Sequel to handle PostgreSQL's enum
# types. To use this extension, first load it into your Database instance:
#
# DB.extension :pg_enum
#
# It allows creation of enum types using create_enum:
#
# DB.create_enum(:enum_type_name, %w'value1 value2 value3')
#
# You can also add values to existing enums via add_enum_value:
#
# DB.add_enum_value(:enum_type_name, 'value4')
#
# If you want to rename an enum type, you can use rename_enum:
#
# DB.rename_enum(:enum_type_name, :enum_type_another_name)
#
# If you want to rename an enum value, you can use rename_enum_value:
#
# DB.rename_enum_value(
# :enum_type_name, :enum_value_name, :enum_value_another_name
# )
#
# If you want to drop an enum type, you can use drop_enum:
#
# DB.drop_enum(:enum_type_name)
#
# Just like any user-created type, after creating the type, you
# can create tables that have a column of that type:
#
# DB.create_table(:table_name) do
# enum_type_name :column_name
# end
#
# When parsing the schema, enum types are recognized, and available
# values returned in the schema hash:
#
# DB.schema(:table_name)
# [[:column_name, {:type=>:enum, :enum_values=>['value1', 'value2']}]]
#
# This extension integrates with the pg_array extension. If you plan
# to use arrays of enum types, load the pg_array extension before the
# pg_enum extension:
#
# DB.extension :pg_array, :pg_enum
#
# DB.create_table(:table_name) do
# column :column_name, 'enum_type_name[]'
# end
# DB[:table_name].get(:column_name)
# # ['value1', 'value2']
#
# If the migration extension is loaded before this one (the order is important),
# you can use create_enum in a reversible migration:
#
# Sequel.migration do
# change do
# create_enum(:enum_type_name, %w'value1 value2 value3')
# end
# end
#
# Finally, typecasting for enums is setup to cast to strings, which
# allows you to use symbols in your model code. Similar, you can provide
# the enum values as symbols when creating enums using create_enum or
# add_enum_value.
#
# Related module: Sequel::Postgres::EnumDatabaseMethods
#
module Sequel
module Postgres
# Methods enabling Database object integration with enum types.
module EnumDatabaseMethods
# Parse the available enum values when loading this extension into
# your database.
def self.extended(db)
db.instance_exec do
@enum_labels = {}
parse_enum_labels
end
end
# Run the SQL to add the given value to the existing enum type.
# Options:
# :after :: Add the new value after this existing value.
# :before :: Add the new value before this existing value.
# :if_not_exists :: Do not raise an error if the value already exists in the enum.
def add_enum_value(enum, value, opts=OPTS)
sql = String.new
sql << "ALTER TYPE #{quote_schema_table(enum)} ADD VALUE#{' IF NOT EXISTS' if opts[:if_not_exists]} #{literal(value.to_s)}"
if v = opts[:before]
sql << " BEFORE #{literal(v.to_s)}"
elsif v = opts[:after]
sql << " AFTER #{literal(v.to_s)}"
end
_process_enum_change_sql(sql)
end
# Run the SQL to create an enum type with the given name and values.
def create_enum(enum, values)
_process_enum_change_sql("CREATE TYPE #{quote_schema_table(enum)} AS ENUM (#{values.map{|v| literal(v.to_s)}.join(', ')})")
end
# Run the SQL to rename the enum type with the given name
# to the another given name.
def rename_enum(enum, new_name)
_process_enum_change_sql("ALTER TYPE #{quote_schema_table(enum)} RENAME TO #{quote_schema_table(new_name)}")
end
# Run the SQL to rename the enum value with the given name
# to the another given name.
def rename_enum_value(enum, old_name, new_name)
_process_enum_change_sql("ALTER TYPE #{quote_schema_table(enum)} RENAME VALUE #{literal(old_name.to_s)} TO #{literal(new_name.to_s)}")
end
# Run the SQL to drop the enum type with the given name.
# Options:
# :if_exists :: Do not raise an error if the enum type does not exist
# :cascade :: Also drop other objects that depend on the enum type
def drop_enum(enum, opts=OPTS)
_process_enum_change_sql("DROP TYPE#{' IF EXISTS' if opts[:if_exists]} #{quote_schema_table(enum)}#{' CASCADE' if opts[:cascade]}")
end
private
# Run the SQL on the database, reparsing the enum labels after it is run.
def _process_enum_change_sql(sql)
run(sql)
parse_enum_labels
nil
end
# Parse the pg_enum table to get enum values, and
# the pg_type table to get names and array oids for
# enums.
def parse_enum_labels
order = [:enumtypid]
order << :enumsortorder if server_version >= 90100
enum_labels = metadata_dataset.from(:pg_enum).
order(*order).
select_hash_groups(Sequel.cast(:enumtypid, Integer).as(:v), :enumlabel).freeze
enum_labels.each_value(&:freeze)
if respond_to?(:register_array_type)
array_types = metadata_dataset.
from(:pg_type).
where(:oid=>enum_labels.keys).
exclude(:typarray=>0).
select_map([:typname, Sequel.cast(:typarray, Integer).as(:v)])
existing_oids = conversion_procs.keys
array_types.each do |name, oid|
next if existing_oids.include?(oid)
register_array_type(name, :oid=>oid)
end
end
Sequel.synchronize{@enum_labels.replace(enum_labels)}
end
# For schema entries that are enums, set the type to
# :enum and add a :enum_values entry with the enum values.
def schema_post_process(_)
super.each do |_, s|
oid = s[:oid]
if values = Sequel.synchronize{@enum_labels[oid]}
s[:type] = :enum
s[:enum_values] = values
end
end
end
# Typecast the given value to a string.
def typecast_value_enum(value)
value.to_s
end
end
end
# support reversible create_enum statements if the migration extension is loaded
# :nocov:
if defined?(MigrationReverser)
# :nocov:
class MigrationReverser
private
def create_enum(name, _)
@actions << [:drop_enum, name]
end
def rename_enum(old_name, new_name)
@actions << [:rename_enum, new_name, old_name]
end
end
end
Database.register_extension(:pg_enum, Postgres::EnumDatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/pg_extended_date_support.rb 0000664 0000000 0000000 00000017565 14342141206 0024765 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_extended_date_support extension allows support
# for BC dates/timestamps by default, and infinite
# dates/timestamps if configured. Without this extension,
# BC and infinite dates/timestamps will be handled incorrectly
# or raise an error. This behavior isn't the default because
# it can hurt performance, and few users need support for BC
# and infinite dates/timestamps.
#
# To load the extension into the database:
#
# DB.extension :pg_extended_date_support
#
# To enable support for infinite dates/timestamps:
#
# DB.convert_infinite_timestamps = 'string' # or 'nil' or 'float'
#
# Related module: Sequel::Postgres::ExtendedDateSupport
#
module Sequel
module Postgres
module ExtendedDateSupport
DATE_YEAR_1 = Date.new(1)
DATETIME_YEAR_1 = DateTime.new(1)
TIME_YEAR_1 = Time.at(-62135596800).utc
INFINITE_TIMESTAMP_STRINGS = ['infinity'.freeze, '-infinity'.freeze].freeze
INFINITE_DATETIME_VALUES = ([PLUS_INFINITY, MINUS_INFINITY] + INFINITE_TIMESTAMP_STRINGS).freeze
PLUS_DATE_INFINITY = Date::Infinity.new
MINUS_DATE_INFINITY = -PLUS_DATE_INFINITY
RATIONAL_60 = Rational(60)
TIME_CAN_PARSE_BC = RUBY_VERSION >= '2.5'
# Add dataset methods and update the conversion proces for dates and timestamps.
def self.extended(db)
db.extend_datasets(DatasetMethods)
procs = db.conversion_procs
procs[1082] = ::Sequel.method(:string_to_date)
procs[1184] = procs[1114] = db.method(:to_application_timestamp)
end
# Handle BC dates and times in bound variables. This is necessary for Date values
# when using both the postgres and jdbc adapters, but also necessary for Time values
# on jdbc.
def bound_variable_arg(arg, conn)
case arg
when Date, Time
literal(arg)
else
super
end
end
# Whether infinite timestamps/dates should be converted on retrieval. By default, no
# conversion is done, so an error is raised if you attempt to retrieve an infinite
# timestamp/date. You can set this to :nil to convert to nil, :string to leave
# as a string, or :float to convert to an infinite float.
attr_reader :convert_infinite_timestamps
# Set whether to allow infinite timestamps/dates. Make sure the
# conversion proc for date reflects that setting.
def convert_infinite_timestamps=(v)
@convert_infinite_timestamps = case v
when Symbol
v
when 'nil'
:nil
when 'string'
:string
when 'date'
:date
when 'float'
:float
when String, true
typecast_value_boolean(v)
else
false
end
pr = old_pr = Sequel.method(:string_to_date)
if @convert_infinite_timestamps
pr = lambda do |val|
case val
when *INFINITE_TIMESTAMP_STRINGS
infinite_timestamp_value(val)
else
old_pr.call(val)
end
end
end
add_conversion_proc(1082, pr)
end
# Handle BC dates in timestamps by moving the BC from after the time to
# after the date, to appease ruby's date parser.
# If convert_infinite_timestamps is true and the value is infinite, return an appropriate
# value based on the convert_infinite_timestamps setting.
def to_application_timestamp(value)
if value.is_a?(String) && (m = /((?:[-+]\d\d:\d\d)(:\d\d)?)?( BC)?\z/.match(value)) && (m[2] || m[3])
if m[3]
value = value.sub(' BC', '').sub(' ', ' BC ')
end
if m[2]
dt = if Sequel.datetime_class == DateTime
DateTime.parse(value)
elsif TIME_CAN_PARSE_BC
Time.parse(value)
# :nocov:
else
DateTime.parse(value).to_time
# :nocov:
end
Sequel.convert_output_timestamp(dt, Sequel.application_timezone)
else
super(value)
end
elsif convert_infinite_timestamps
case value
when *INFINITE_TIMESTAMP_STRINGS
infinite_timestamp_value(value)
else
super
end
else
super
end
end
private
# Return an appropriate value for the given infinite timestamp string.
def infinite_timestamp_value(value)
case convert_infinite_timestamps
when :nil
nil
when :string
value
when :date
value == 'infinity' ? PLUS_DATE_INFINITY : MINUS_DATE_INFINITY
else
value == 'infinity' ? PLUS_INFINITY : MINUS_INFINITY
end
end
# If the value is an infinite value (either an infinite float or a string returned by
# by PostgreSQL for an infinite date), return it without converting it if
# convert_infinite_timestamps is set.
def typecast_value_date(value)
if convert_infinite_timestamps
case value
when *INFINITE_DATETIME_VALUES
value
else
super
end
else
super
end
end
# If the value is an infinite value (either an infinite float or a string returned by
# by PostgreSQL for an infinite timestamp), return it without converting it if
# convert_infinite_timestamps is set.
def typecast_value_datetime(value)
if convert_infinite_timestamps
case value
when *INFINITE_DATETIME_VALUES
value
else
super
end
else
super
end
end
module DatasetMethods
private
# Handle BC Date objects.
def literal_date(date)
if date < DATE_YEAR_1
date <<= ((date.year) * 24 - 12)
date.strftime("'%Y-%m-%d BC'")
else
super
end
end
# Handle BC DateTime objects.
def literal_datetime(date)
if date < DATETIME_YEAR_1
date <<= ((date.year) * 24 - 12)
date = db.from_application_timestamp(date)
minutes = (date.offset * 1440).to_i
date.strftime("'%Y-%m-%d %H:%M:%S.%N#{format_timestamp_offset(*minutes.divmod(60))} BC'")
else
super
end
end
# Handle Date::Infinity values
def literal_other_append(sql, v)
if v.is_a?(Date::Infinity)
sql << (v > 0 ? "'infinity'" : "'-infinity'")
else
super
end
end
if RUBY_ENGINE == 'jruby'
# :nocov:
ExtendedDateSupport::CONVERT_TYPES = [Java::JavaSQL::Types::DATE, Java::JavaSQL::Types::TIMESTAMP]
# Use non-JDBC parsing as JDBC parsing doesn't work for BC dates/timestamps.
def type_convertor(map, meta, type, i)
case type
when *CONVERT_TYPES
db.oid_convertor_proc(meta.getField(i).getOID)
else
super
end
end
# Work around JRuby bug #4822 in Time#to_datetime for times before date of calendar reform
def literal_time(time)
if time < TIME_YEAR_1
literal_datetime(DateTime.parse(super))
else
super
end
end
# :nocov:
else
# Handle BC Time objects.
def literal_time(time)
if time < TIME_YEAR_1
time = db.from_application_timestamp(time)
time.strftime("'#{sprintf('%04i', time.year.abs+1)}-%m-%d %H:%M:%S.%N#{format_timestamp_offset(*(time.utc_offset/RATIONAL_60).divmod(60))} BC'")
else
super
end
end
end
end
end
end
Database.register_extension(:pg_extended_date_support, Postgres::ExtendedDateSupport)
end
sequel-5.63.0/lib/sequel/extensions/pg_extended_integer_support.rb 0000664 0000000 0000000 00000010222 14342141206 0025464 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_extended_integer_support extension supports literalizing
# Ruby integers outside of PostgreSQL bigint range on PostgreSQL.
# Sequel by default will raise exceptions when
# literalizing such integers, as PostgreSQL would treat them
# as numeric type values instead of integer/bigint type values
# if unquoted, which can result in unexpected negative performance
# (e.g. forcing sequential scans when index scans would be used for
# an integer/bigint type).
#
# To load the extension into a Dataset (this returns a new Dataset):
#
# dataset = dataset.extension(:pg_extended_integer_support)
#
# To load the extension into a Database, so it affects all of the
# Database's datasets:
#
# DB.extension :pg_extended_integer_support
#
# By default, the extension will quote integers outside
# bigint range:
#
# DB.literal(2**63) # => "'9223372036854775808'"
#
# Quoting the value treats the type as unknown:
#
# DB.get{pg_typeof(2**63)} # => 'unknown'
#
# PostgreSQL will implicitly cast the unknown type to the appropriate
# database type, raising an error if it cannot be casted. Be aware this
# can result in the integer value being implicitly casted to text or
# any other PostgreSQL type:
#
# # Returns a string, not an integer:
# DB.get{2**63}
# # => "9223372036854775808"
#
# You can use the Dataset#integer_outside_bigint_range_strategy method
# with the value +:raw+ to change the strategy to not quote the variable:
#
# DB.dataset.
# integer_outside_bigint_range_strategy(:raw).
# literal(2**63)
# # => "9223372036854775808"
#
# Note that not quoting the value will result in PostgreSQL treating
# the type as numeric instead of integer:
#
# DB.dataset.
# integer_outside_bigint_range_strategy(:raw).
# get{pg_typeof(2**63)}
# # => "numeric"
#
# The +:raw+ behavior was Sequel's historical behavior, but unless
# you fully understand the reprecussions of PostgreSQL using a
# numeric type for integer values, you should not use it.
#
# To get the current default behavior of raising an exception for
# integers outside of PostgreSQL bigint range, you can use a strategy
# of +:raise+.
#
# To specify a default strategy for handling integers outside
# bigint range that applies to all of a Database's datasets, you can
# use the +:integer_outside_bigint_range_strategy+ Database option with
# a value of +:raise+ or +:raw+:
#
# DB.opts[:integer_outside_bigint_range_strategy] = :raw
#
# The Database option will be used as a fallback if you did not call
# the Dataset#integer_outside_bigint_range_strategy method to specify
# a strategy for the dataset.
#
# Related module: Sequel::Postgres::ExtendedIntegerSupport
#
module Sequel
module Postgres
module ExtendedIntegerSupport
# Set the strategy for handling integers outside PostgreSQL
# bigint range. Supported values:
#
# :quote :: Quote the integer value. PostgreSQL will treat
# the integer as a unknown type, implicitly casting
# to any other type as needed. This is the default
# value when using the pg_extended_integer_support
# extension.
# :raise :: Raise error when attempting to literalize the integer
# (the default behavior of Sequel on PostgreSQL when
# not using the pg_extended_integer_support extension).
# :raw :: Use raw integer value without quoting. PostgreSQL
# will treat the integer as a numeric. This was Sequel's
# historical behavior, but it is unlikely to be desired.
def integer_outside_bigint_range_strategy(strategy)
clone(:integer_outside_bigint_range_strategy=>strategy)
end
private
# Handle integers outside the bigint range by using
# the configured strategy.
def literal_integer_outside_bigint_range(v)
case @opts[:integer_outside_bigint_range_strategy] || @db.opts[:integer_outside_bigint_range_strategy]
when :raise
super
when :raw
v.to_s
else # when :quote
"'#{v}'"
end
end
end
end
Dataset.register_extension(:pg_extended_integer_support, Postgres::ExtendedIntegerSupport)
end
sequel-5.63.0/lib/sequel/extensions/pg_hstore.rb 0000664 0000000 0000000 00000025071 14342141206 0021667 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_hstore extension adds support for the PostgreSQL hstore type
# to Sequel. hstore is an extension that ships with PostgreSQL, and
# the hstore type stores an arbitrary key-value table, where the keys
# are strings and the values are strings or NULL.
#
# This extension integrates with Sequel's native postgres and jdbc/postgresql
# adapters, so that when hstore fields are retrieved, they are parsed and returned
# as instances of Sequel::Postgres::HStore. HStore is
# a DelegateClass of Hash, so it mostly acts like a hash, but not
# completely (is_a?(Hash) is false). If you want the actual hash,
# you can call HStore#to_hash. This is done so that Sequel does not
# treat a HStore like a Hash by default, which would cause issues.
#
# In addition to the parsers, this extension comes with literalizers
# for HStore using the standard Sequel literalization callbacks, so
# they work with on all adapters.
#
# To turn an existing Hash into an HStore, use Sequel.hstore:
#
# Sequel.hstore(hash)
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Hash#hstore:
#
# hash.hstore
#
# Since the hstore type only supports strings, non string keys and
# values are converted to strings
#
# Sequel.hstore(foo: 1).to_hash # {'foo'=>'1'}
# v = Sequel.hstore({})
# v[:foo] = 1
# v # {'foo'=>'1'}
#
# However, to make life easier, lookups by key are converted to
# strings (even when accessing the underlying hash directly):
#
# Sequel.hstore('foo'=>'bar')[:foo] # 'bar'
# Sequel.hstore('foo'=>'bar').to_hash[:foo] # 'bar'
#
# HStore instances mostly just delegate to the underlying hash
# instance, so Hash methods that modify the receiver or returned
# modified copies of the receiver may not do string conversion.
# The following methods will handle string conversion, and more
# can be added later if desired:
#
# * \[\]
# * \[\]=
# * assoc
# * delete
# * fetch
# * has_key?
# * has_value?
# * include?
# * key
# * key?
# * member?
# * merge
# * merge!
# * rassoc
# * replace
# * store
# * update
# * value?
#
# If you want to insert a hash into an hstore database column:
#
# DB[:table].insert(column: Sequel.hstore('foo'=>'bar'))
#
# To use this extension, first load it into your Sequel::Database instance:
#
# DB.extension :pg_hstore
#
# This extension integrates with the pg_array extension. If you plan
# to use arrays of hstore types, load the pg_array extension before the
# pg_hstore extension:
#
# DB.extension :pg_array, :pg_hstore
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using hstore columns in CREATE/ALTER TABLE statements.
#
# This extension requires the delegate and strscan libraries.
#
# Related module: Sequel::Postgres::HStore
require 'delegate'
require 'strscan'
module Sequel
module Postgres
class HStore < DelegateClass(Hash)
include Sequel::SQL::AliasMethods
# Parser for PostgreSQL hstore output format.
class Parser < StringScanner
# Parse the output format that PostgreSQL uses for hstore
# columns. Note that this does not attempt to parse all
# input formats that PostgreSQL will accept. For instance,
# it expects all keys and non-NULL values to be quoted.
#
# Return the resulting hash of objects. This can be called
# multiple times, it will cache the parsed hash on the first
# call and use it for subsequent calls.
def parse
return @result if @result
hash = {}
while !eos?
skip(/"/)
k = parse_quoted
skip(/"\s*=>\s*/)
if skip(/"/)
v = parse_quoted
skip(/"/)
else
scan(/NULL/)
v = nil
end
skip(/,\s*/)
hash[k] = v
end
@result = hash
end
private
# Parse and unescape a quoted key/value.
def parse_quoted
scan(/(\\"|[^"])*/).gsub(/\\(.)/, '\1')
end
end
module DatabaseMethods
def self.extended(db)
db.instance_exec do
add_named_conversion_proc(:hstore, &HStore.method(:parse))
@schema_type_classes[:hstore] = HStore
end
end
# Handle hstores in bound variables
def bound_variable_arg(arg, conn)
case arg
when HStore
arg.unquoted_literal
when Hash
HStore.new(arg).unquoted_literal
else
super
end
end
private
# Recognize the hstore database type.
def schema_column_type(db_type)
db_type == 'hstore' ? :hstore : super
end
# Set the :callable_default value if the default value is recognized as an empty hstore.
def schema_post_process(_)
super.each do |a|
h = a[1]
if h[:type] == :hstore && h[:default] =~ /\A''::hstore\z/
h[:callable_default] = lambda{HStore.new({})}
end
end
end
# Typecast value correctly to HStore. If already an
# HStore instance, return as is. If a hash, return
# an HStore version of it. If a string, assume it is
# in PostgreSQL output format and parse it using the
# parser.
def typecast_value_hstore(value)
case value
when HStore
value
when Hash
HStore.new(value)
else
raise Sequel::InvalidValue, "invalid value for hstore: #{value.inspect}"
end
end
end
# Default proc used for all underlying HStore hashes, so that even
# if you grab the underlying hash, it will still convert non-string
# keys to strings during lookup.
DEFAULT_PROC = lambda{|h, k| h[k.to_s] unless k.is_a?(String)}
# Undef marshal_{dump,load} methods in the delegate class,
# so that ruby uses the old style _dump/_load methods defined
# in the delegate class, instead of the marshal_{dump,load} methods
# in the Hash class.
undef_method :marshal_load
undef_method :marshal_dump
# Use custom marshal loading, since underlying hash uses a default proc.
def self._load(args)
new(Hash[Marshal.load(args)])
end
# Parse the given string into an HStore, assuming the str is in PostgreSQL
# hstore output format.
def self.parse(str)
new(Parser.new(str).parse)
end
# Override methods that accept key argument to convert to string.
%w'[] delete has_key? include? key? member? assoc'.each do |m|
class_eval("def #{m}(k) super(k.to_s) end", __FILE__, __LINE__)
end
# Override methods that accept value argument to convert to string unless nil.
%w'has_value? value? key rassoc'.each do |m|
class_eval("def #{m}(v) super(convert_value(v)) end", __FILE__, __LINE__)
end
# Override methods that accept key and value arguments to convert to string appropriately.
%w'[]= store'.each do |m|
class_eval("def #{m}(k, v) super(k.to_s, convert_value(v)) end", __FILE__, __LINE__)
end
# Override methods that take hashes to convert the hashes to using strings for keys and
# values before using them.
%w'initialize merge! update replace'.each do |m|
class_eval("def #{m}(h, &block) super(convert_hash(h), &block) end", __FILE__, __LINE__)
end
# Use custom marshal dumping, since underlying hash uses a default proc.
def _dump(*)
Marshal.dump(to_a)
end
# Override to force the key argument to a string.
def fetch(key, *args, &block)
super(key.to_s, *args, &block)
end
# Convert the input hash to string keys and values before merging,
# and return a new HStore instance with the merged hash.
def merge(hash, &block)
self.class.new(super(convert_hash(hash), &block))
end
# Return the underlying hash used by this HStore instance.
alias to_hash __getobj__
# Append a literalize version of the hstore to the sql.
def sql_literal_append(ds, sql)
ds.literal_append(sql, unquoted_literal)
sql << '::hstore'
end
# Return a string containing the unquoted, unstring-escaped
# literal version of the hstore. Separated out for use by
# the bound argument code.
def unquoted_literal
str = String.new
comma = false
commas = ","
quote = '"'
kv_sep = "=>"
null = "NULL"
each do |k, v|
str << commas if comma
str << quote << escape_value(k) << quote
str << kv_sep
if v.nil?
str << null
else
str << quote << escape_value(v) << quote
end
comma = true
end
str
end
# Allow automatic parameterization.
def sequel_auto_param_type(ds)
"::hstore"
end
private
# Return a new hash based on the input hash with string
# keys and string or nil values.
def convert_hash(h)
hash = Hash.new(&DEFAULT_PROC)
h.each{|k,v| hash[k.to_s] = convert_value(v)}
hash
end
# Return value v as a string unless it is already nil.
def convert_value(v)
v.to_s unless v.nil?
end
# Escape key/value strings when literalizing to
# correctly handle backslash and quote characters.
def escape_value(k)
k.to_s.gsub(/("|\\)/, '\\\\\1')
end
end
end
module SQL::Builders
# Return a Postgres::HStore proxy for the given hash.
def hstore(v)
case v
when Postgres::HStore
v
when Hash
Postgres::HStore.new(v)
else
# May not be defined unless the pg_hstore_ops extension is used
hstore_op(v)
end
end
end
Database.register_extension(:pg_hstore, Postgres::HStore::DatabaseMethods)
end
# :nocov:
if Sequel.core_extensions?
class Hash
# Create a new HStore using the receiver as the input
# hash. Note that the HStore created will not use the
# receiver as the backing store, since it has to
# modify the hash. To get the new backing store, use:
#
# hash.hstore.to_hash
def hstore
Sequel::Postgres::HStore.new(self)
end
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Hash do
def hstore
Sequel::Postgres::HStore.new(self)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_hstore_ops.rb 0000664 0000000 0000000 00000031554 14342141206 0022553 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_hstore_ops extension adds support to Sequel's DSL to make
# it easier to call PostgreSQL hstore functions and operators.
#
# To load the extension:
#
# Sequel.extension :pg_hstore_ops
#
# The most common usage is taking an object that represents an SQL
# expression (such as a :symbol), and calling Sequel.hstore_op with it:
#
# h = Sequel.hstore_op(:hstore_column)
#
# If you have also loaded the pg_hstore extension, you can use
# Sequel.hstore as well:
#
# h = Sequel.hstore(:hstore_column)
#
# Also, on most Sequel expression objects, you can call the hstore
# method:
#
# h = Sequel[:hstore_column].hstore
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#hstore:
#
# h = :hstore_column.hstore
#
# This creates a Sequel::Postgres::HStoreOp object that can be used
# for easier querying:
#
# h - 'a' # hstore_column - CAST('a' AS text)
# h['a'] # hstore_column -> 'a'
#
# h.concat(:other_hstore_column) # ||
# h.has_key?('a') # ?
# h.contain_all(:array_column) # ?&
# h.contain_any(:array_column) # ?|
# h.contains(:other_hstore_column) # @>
# h.contained_by(:other_hstore_column) # <@
#
# h.defined # defined(hstore_column)
# h.delete('a') # delete(hstore_column, 'a')
# h.each # each(hstore_column)
# h.keys # akeys(hstore_column)
# h.populate(:a) # populate_record(a, hstore_column)
# h.record_set(:a) # (a #= hstore_column)
# h.skeys # skeys(hstore_column)
# h.slice(:a) # slice(hstore_column, a)
# h.svals # svals(hstore_column)
# h.to_array # hstore_to_array(hstore_column)
# h.to_matrix # hstore_to_matrix(hstore_column)
# h.values # avals(hstore_column)
#
# Here are a couple examples for updating an existing hstore column:
#
# # Add a key, or update an existing key with a new value
# DB[:tab].update(h: Sequel.hstore_op(:h).concat('c'=>3))
#
# # Delete a key
# DB[:tab].update(h: Sequel.hstore_op(:h).delete('k1'))
#
# On PostgreSQL 14+, The hstore [] method will use subscripts instead of being
# the same as +get+, if the value being wrapped is an identifer:
#
# Sequel.hstore_op(:hstore_column)['a'] # hstore_column['a']
# Sequel.hstore_op(Sequel[:h][:s])['a'] # h.s['a']
#
# This support allows you to use hstore subscripts in UPDATE statements to update only
# part of a column:
#
# h = Sequel.hstore_op(:h)
# DB[:t].update(h['key1'] => 'val1', h['key2'] => 'val2')
# # UPDATE "t" SET "h"['key1'] = 'val1', "h"['key2'] = 'val2'
#
# See the PostgreSQL hstore function and operator documentation for more
# details on what these functions and operators do.
#
# If you are also using the pg_hstore extension, you should load it before
# loading this extension. Doing so will allow you to use HStore#op to get
# an HStoreOp, allowing you to perform hstore operations on hstore literals.
#
# Some of these methods will accept ruby arrays and convert them automatically to
# PostgreSQL arrays if you have the pg_array extension loaded. Some of these methods
# will accept ruby hashes and convert them automatically to PostgreSQL hstores if the
# pg_hstore extension is loaded. Methods representing expressions that return
# PostgreSQL arrays will have the returned expression automatically wrapped in a
# Postgres::ArrayOp if the pg_array_ops extension is loaded.
#
# Related module: Sequel::Postgres::HStoreOp
#
module Sequel
module Postgres
# The HStoreOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# PostgreSQL hstore operators and functions.
#
# In the method documentation examples, assume that:
#
# hstore_op = :hstore.hstore
class HStoreOp < Sequel::SQL::Wrapper
CONCAT = ["(".freeze, " || ".freeze, ")".freeze].freeze
CONTAIN_ALL = ["(".freeze, " ?& ".freeze, ")".freeze].freeze
CONTAIN_ANY = ["(".freeze, " ?| ".freeze, ")".freeze].freeze
CONTAINS = ["(".freeze, " @> ".freeze, ")".freeze].freeze
CONTAINED_BY = ["(".freeze, " <@ ".freeze, ")".freeze].freeze
HAS_KEY = ["(".freeze, " ? ".freeze, ")".freeze].freeze
LOOKUP = ["(".freeze, " -> ".freeze, ")".freeze].freeze
RECORD_SET = ["(".freeze, " #= ".freeze, ")".freeze].freeze
# Delete entries from an hstore using the subtraction operator:
#
# hstore_op - 'a' # (hstore - 'a')
def -(other)
other = if other.is_a?(String) && !other.is_a?(Sequel::LiteralString)
Sequel.cast_string(other)
else
wrap_input_array(wrap_input_hash(other))
end
HStoreOp.new(super)
end
# Lookup the value for the given key in an hstore:
#
# hstore_op['a'] # (hstore -> 'a')
def [](key)
if key.is_a?(Array) || (defined?(Sequel::Postgres::PGArray) && key.is_a?(Sequel::Postgres::PGArray)) || (defined?(Sequel::Postgres::ArrayOp) && key.is_a?(Sequel::Postgres::ArrayOp))
wrap_output_array(Sequel::SQL::PlaceholderLiteralString.new(LOOKUP, [value, wrap_input_array(key)]))
else
v = case @value
when Symbol, SQL::Identifier, SQL::QualifiedIdentifier
HStoreSubscriptOp.new(self, key)
else
Sequel::SQL::PlaceholderLiteralString.new(LOOKUP, [value, key])
end
Sequel::SQL::StringExpression.new(:NOOP, v)
end
end
# Check if the receiver contains all of the keys in the given array:
#
# hstore_op.contain_all(:a) # (hstore ?& a)
def contain_all(other)
bool_op(CONTAIN_ALL, wrap_input_array(other))
end
# Check if the receiver contains any of the keys in the given array:
#
# hstore_op.contain_any(:a) # (hstore ?| a)
def contain_any(other)
bool_op(CONTAIN_ANY, wrap_input_array(other))
end
# Check if the receiver contains all entries in the other hstore:
#
# hstore_op.contains(:h) # (hstore @> h)
def contains(other)
bool_op(CONTAINS, wrap_input_hash(other))
end
# Check if the other hstore contains all entries in the receiver:
#
# hstore_op.contained_by(:h) # (hstore <@ h)
def contained_by(other)
bool_op(CONTAINED_BY, wrap_input_hash(other))
end
# Check if the receiver contains a non-NULL value for the given key:
#
# hstore_op.defined('a') # defined(hstore, 'a')
def defined(key)
Sequel::SQL::BooleanExpression.new(:NOOP, function(:defined, key))
end
# Delete the matching entries from the receiver:
#
# hstore_op.delete('a') # delete(hstore, 'a')
def delete(key)
HStoreOp.new(function(:delete, wrap_input_array(wrap_input_hash(key))))
end
# Transform the receiver into a set of keys and values:
#
# hstore_op.each # each(hstore)
def each
function(:each)
end
# Check if the receiver contains the given key:
#
# hstore_op.has_key?('a') # (hstore ? 'a')
def has_key?(key)
bool_op(HAS_KEY, key)
end
alias include? has_key?
alias key? has_key?
alias member? has_key?
alias exist? has_key?
# Return the receiver.
def hstore
self
end
# Return the keys as a PostgreSQL array:
#
# hstore_op.keys # akeys(hstore)
def keys
wrap_output_array(function(:akeys))
end
alias akeys keys
# Merge a given hstore into the receiver:
#
# hstore_op.merge(:a) # (hstore || a)
def merge(other)
HStoreOp.new(Sequel::SQL::PlaceholderLiteralString.new(CONCAT, [self, wrap_input_hash(other)]))
end
alias concat merge
# Create a new record populated with entries from the receiver:
#
# hstore_op.populate(:a) # populate_record(a, hstore)
def populate(record)
SQL::Function.new(:populate_record, record, self)
end
# Update the values in a record using entries in the receiver:
#
# hstore_op.record_set(:a) # (a #= hstore)
def record_set(record)
Sequel::SQL::PlaceholderLiteralString.new(RECORD_SET, [record, value])
end
# Return the keys as a PostgreSQL set:
#
# hstore_op.skeys # skeys(hstore)
def skeys
function(:skeys)
end
# Return an hstore with only the keys in the given array:
#
# hstore_op.slice(:a) # slice(hstore, a)
def slice(keys)
HStoreOp.new(function(:slice, wrap_input_array(keys)))
end
# Return the values as a PostgreSQL set:
#
# hstore_op.svals # svals(hstore)
def svals
function(:svals)
end
# Return a flattened array of the receiver with alternating
# keys and values:
#
# hstore_op.to_array # hstore_to_array(hstore)
def to_array
wrap_output_array(function(:hstore_to_array))
end
# Return a nested array of the receiver, with arrays of
# 2 element (key/value) arrays:
#
# hstore_op.to_matrix # hstore_to_matrix(hstore)
def to_matrix
wrap_output_array(function(:hstore_to_matrix))
end
# Return the values as a PostgreSQL array:
#
# hstore_op.values # avals(hstore)
def values
wrap_output_array(function(:avals))
end
alias avals values
private
# Return a placeholder literal with the given str and args, wrapped
# in a boolean expression, used by operators that return booleans.
def bool_op(str, other)
Sequel::SQL::BooleanExpression.new(:NOOP, Sequel::SQL::PlaceholderLiteralString.new(str, [value, other]))
end
# Return a function with the given name, and the receiver as the first
# argument, with any additional arguments given.
def function(name, *args)
SQL::Function.new(name, self, *args)
end
# Wrap argument in a PGArray if it is an array
def wrap_input_array(obj)
if obj.is_a?(Array) && Sequel.respond_to?(:pg_array)
Sequel.pg_array(obj)
else
obj
end
end
# Wrap argument in an Hstore if it is a hash
def wrap_input_hash(obj)
if obj.is_a?(Hash) && Sequel.respond_to?(:hstore)
Sequel.hstore(obj)
else
obj
end
end
# Wrap argument in a PGArrayOp if supported
def wrap_output_array(obj)
if Sequel.respond_to?(:pg_array_op)
Sequel.pg_array_op(obj)
else
obj
end
end
end
# Represents hstore subscripts. This is abstracted because the
# subscript support depends on the database version.
class HStoreSubscriptOp < SQL::Expression
SUBSCRIPT = ["".freeze, "[".freeze, "]".freeze].freeze
# The expression being subscripted
attr_reader :expression
# The subscript to use
attr_reader :sub
# Set the expression and subscript to the given arguments
def initialize(expression, sub)
@expression = expression
@sub = sub
freeze
end
# Use subscripts instead of -> operator on PostgreSQL 14+
def to_s_append(ds, sql)
server_version = ds.db.server_version
frag = server_version && server_version >= 140000 ? SUBSCRIPT : HStoreOp::LOOKUP
ds.literal_append(sql, Sequel::SQL::PlaceholderLiteralString.new(frag, [@expression, @sub]))
end
# Support transforming of hstore subscripts
def sequel_ast_transform(transformer)
self.class.new(transformer.call(@expression), transformer.call(@sub))
end
end
module HStoreOpMethods
# Wrap the receiver in an HStoreOp so you can easily use the PostgreSQL
# hstore functions and operators with it.
def hstore
HStoreOp.new(self)
end
end
# :nocov:
if defined?(HStore)
# :nocov:
class HStore
# Wrap the receiver in an HStoreOp so you can easily use the PostgreSQL
# hstore functions and operators with it.
def op
HStoreOp.new(self)
end
end
end
end
module SQL::Builders
# Return the object wrapped in an Postgres::HStoreOp.
def hstore_op(v)
case v
when Postgres::HStoreOp
v
else
Postgres::HStoreOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::Postgres::HStoreOpMethods
end
class LiteralString
include Sequel::Postgres::HStoreOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::HStoreOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::HStoreOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_inet.rb 0000664 0000000 0000000 00000010525 14342141206 0021320 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_inet extension adds support for Sequel to handle
# PostgreSQL's inet and cidr types using ruby's IPAddr class.
#
# This extension integrates with Sequel's native postgres and jdbc/postgresql
# adapters, so that when inet/cidr fields are retrieved, they are returned as
# IPAddr instances.
#
# To use this extension, load it into your database:
#
# DB.extension :pg_inet
#
# This extension integrates with the pg_array extension. If you plan
# to use the inet[] or cidr[] types, load the pg_array extension before
# the pg_inet extension:
#
# DB.extension :pg_array, :pg_inet
#
# This extension does not add special support for the macaddr
# type. Ruby doesn't have a stdlib class that represents mac
# addresses, so these will still be returned as strings. The exception
# to this is that the pg_array extension integration will recognize
# macaddr[] types return them as arrays of strings.
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using inet/cidr columns in CREATE/ALTER TABLE statements.
#
# Related module: Sequel::Postgres::InetDatabaseMethods
require 'ipaddr'
module Sequel
module Postgres
# Methods enabling Database object integration with the inet/cidr types.
module InetDatabaseMethods
# Reset the conversion procs when extending the Database object, so
# it will pick up the inet/cidr converter. Also, extend the datasets
# with support for literalizing the IPAddr types.
def self.extended(db)
db.instance_exec do
extend_datasets(InetDatasetMethods)
# :nocov:
if !defined?(SEQUEL_PG_VERSION_INTEGER) || SEQUEL_PG_VERSION_INTEGER >= 11300
# :nocov:
# sequel_pg 1.13.0+ will use inet/cidr conversion procs, but doing so is
# slower, so don't add the conversion procs if using sequel_pg 1.13.0+.
meth = IPAddr.method(:new)
add_conversion_proc(869, meth)
add_conversion_proc(650, meth)
if respond_to?(:register_array_type)
register_array_type('inet', :oid=>1041, :scalar_oid=>869)
register_array_type('cidr', :oid=>651, :scalar_oid=>650)
end
end
if respond_to?(:register_array_type)
register_array_type('macaddr', :oid=>1040, :scalar_oid=>829)
end
@schema_type_classes[:ipaddr] = IPAddr
end
end
# Convert an IPAddr arg to a string. Probably not necessary, but done
# for safety.
def bound_variable_arg(arg, conn)
case arg
when IPAddr
"#{arg.to_s}/#{arg.instance_variable_get(:@mask_addr).to_s(2).count('1')}"
else
super
end
end
private
# Make the column type detection recognize the inet and cidr types.
def schema_column_type(db_type)
case db_type
when 'inet', 'cidr'
:ipaddr
else
super
end
end
# Set the :ruby_default value if the default value is recognized as an ip address.
def schema_post_process(_)
super.each do |a|
h = a[1]
if h[:type] == :ipaddr && h[:default] =~ /\A'([:a-fA-F0-9\.\/]+)'::(?:inet|cidr)\z/
h[:ruby_default] = IPAddr.new($1)
end
end
end
# Typecast the given value to an IPAddr object.
def typecast_value_ipaddr(value)
case value
when IPAddr
value
when String
IPAddr.new(typecast_check_string_length(value, 100))
else
raise Sequel::InvalidValue, "invalid value for inet/cidr: #{value.inspect}"
end
end
end
module InetDatasetMethods
private
# Allow auto parameterization of IPAddr instances.
def auto_param_type_fallback(v)
if defined?(super) && (type = super)
type
elsif IPAddr === v
"::inet"
end
end
# Convert IPAddr value to a string and append a literal version
# of the string to the sql.
def literal_other_append(sql, value)
if value.is_a?(IPAddr)
literal_string_append(sql, "#{value.to_s}/#{value.instance_variable_get(:@mask_addr).to_s(2).count('1')}")
else
super
end
end
end
end
Database.register_extension(:pg_inet, Postgres::InetDatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/pg_inet_ops.rb 0000664 0000000 0000000 00000013754 14342141206 0022210 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_inet_ops extension adds support to Sequel's DSL to make
# it easier to call PostgreSQL inet functions and operators.
#
# To load the extension:
#
# Sequel.extension :pg_inet_ops
#
# The most common usage is passing an expression to Sequel.pg_inet_op:
#
# r = Sequel.pg_inet_op(:inet)
#
# Also, on most Sequel expression objects, you can call the pg_inet
# method:
#
# r = Sequel[:ip].pg_inet
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#pg_inet:
#
# r = :inet.pg_inet
#
# This creates a Sequel::Postgres::InetOp object that can be used
# for easier querying:
#
# ~r # ~inet
# r & other # inet & other
# r | other # inet | other
# r << :other # inet << other
# r >> :other # inet >> other
#
# r.contained_by(:other) # inet << other
# r.contained_by_or_equals(:other) # inet <<= other
# r.contains(:other) # inet >> other
# r.contains_or_equals(:other) # inet >>= other
# r.contains_or_contained_by(:other) # inet && other
#
# r.abbrev # abbrev(inet)
# r.broadcast # broadcast(inet)
# r.family # family(inet)
# r.host # host(inet)
# r.hostmask # hostmask(inet)
# r.masklen # masklen(inet)
# r.netmask # netmask(inet)
# r.network # network(inet)
# r.set_masklen(16) # set_masklen(inet, 16)
# r.text # text(inet)
#
# If a String or IPAddr instance is passed to Sequel.pg_inet_op, it will automatically
# be cast to +inet+. To treat the object as a +cidr+, you must cast it before passing
# it to Sequel.pg_inet_op:
#
# r = Sequel.pg_inet_op(Sequel.cast('1.2.3.4', :cidr))
#
# See the PostgreSQL network function and operator documentation for more
# details on what these functions and operators do.
#
# Related module: Sequel::Postgres::InetOp
require 'ipaddr'
module Sequel
module Postgres
# The InetOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# PostgreSQL inet operators and functions.
#
# Most methods in this class are defined via metaprogramming, see
# the pg_inet_ops extension documentation for details on the API.
class InetOp < Sequel::SQL::Wrapper
include Sequel::SQL::BitwiseMethods
# For String and IPAddr instances, wrap them in a cast to inet,
# to avoid ambiguity issues when calling operator methods.
def initialize(v)
case v
when ::Sequel::LiteralString
# nothing
when String, IPAddr
v = Sequel.cast(v, :inet)
end
super
end
OPERATORS = {
:contained_by_or_equals => ["(".freeze, " <<= ".freeze, ")".freeze].freeze,
:contains_or_equals => ["(".freeze, " >>= ".freeze, ")".freeze].freeze,
:contains_or_contained_by => ["(".freeze, " && ".freeze, ")".freeze].freeze,
}.freeze
OPERATORS.keys.each do |f|
class_eval("def #{f}(v) Sequel::SQL::BooleanExpression.new(:NOOP, operator(:#{f}, v)) end", __FILE__, __LINE__)
end
%w'<< >>'.each do |f|
class_eval("def #{f}(v) Sequel::SQL::BooleanExpression.new(:NOOP, super) end", __FILE__, __LINE__)
end
%w'& | +'.each do |f|
class_eval("def #{f}(v) self.class.new(super) end", __FILE__, __LINE__)
end
%w'abbrev host text'.each do |f|
class_eval("def #{f}() Sequel::SQL::StringExpression.new(:NOOP, function(:#{f})) end", __FILE__, __LINE__)
end
%w'family masklen'.each do |f|
class_eval("def #{f}() Sequel::SQL::NumericExpression.new(:NOOP, function(:#{f})) end", __FILE__, __LINE__)
end
%w'broadcast hostmask netmask network'.each do |f|
class_eval("def #{f}() self.class.new(function(:#{f})) end", __FILE__, __LINE__)
end
# Return the receiver.
def pg_inet
self
end
# Return an expression for the bitwise NOT of the receiver
def ~
self.class.new(super)
end
# Return an expression for the subtraction of the argument from the receiver
def -(v)
case v
when Integer
self.class.new(super)
else
Sequel::SQL::NumericExpression.new(:NOOP, super)
end
end
# Return an expression for the calling of the set_masklen function with the receiver and the given argument
def set_masklen(v)
self.class.new(Sequel::SQL::Function.new(:set_masklen, self, v))
end
alias contained_by <<
alias contains >>
undef_method :*, :/
private
# Handle PostgreSQL specific operator types
def operator(type, other)
Sequel::SQL::PlaceholderLiteralString.new(OPERATORS[type], [value, other])
end
# Return a function called with the receiver.
def function(name)
Sequel::SQL::Function.new(name, self)
end
end
module InetOpMethods
# Wrap the receiver in an InetOp so you can easily use the PostgreSQL
# inet functions and operators with it.
def pg_inet
InetOp.new(self)
end
end
end
module SQL::Builders
# Return the expression wrapped in the Postgres::InetOp.
def pg_inet_op(v)
case v
when Postgres::InetOp
v
else
Postgres::InetOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::Postgres::InetOpMethods
end
class LiteralString
include Sequel::Postgres::InetOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::InetOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::InetOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_interval.rb 0000664 0000000 0000000 00000016621 14342141206 0022210 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_interval extension adds support for PostgreSQL's interval type.
#
# This extension integrates with Sequel's native postgres and jdbc/postgresql
# adapters, so that when interval type values are retrieved, they are parsed and returned
# as instances of ActiveSupport::Duration.
#
# In addition to the parser, this extension adds literalizers for
# ActiveSupport::Duration that use the standard Sequel literalization
# callbacks, so they work on all adapters.
#
# To use this extension, load it into the Database instance:
#
# DB.extension :pg_interval
#
# This extension integrates with the pg_array extension. If you plan
# to use arrays of interval types, load the pg_array extension before the
# pg_interval extension:
#
# DB.extension :pg_array, :pg_interval
#
# The parser this extension uses requires that IntervalStyle for PostgreSQL
# is set to postgres (the default setting). If IntervalStyle is changed from
# the default setting, the parser will probably not work. The parser used is
# very simple, and is only designed to parse PostgreSQL's default output
# format, it is not designed to support all input formats that PostgreSQL
# supports.
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using interval columns in CREATE/ALTER TABLE statements.
#
# Related module: Sequel::Postgres::IntervalDatabaseMethods
require 'active_support'
require 'active_support/duration'
# :nocov:
begin
require 'active_support/version'
rescue LoadError
end
# :nocov:
module Sequel
module Postgres
module IntervalDatabaseMethods
DURATION_UNITS = [:years, :months, :weeks, :days, :hours, :minutes, :seconds].freeze
# Return an unquoted string version of the duration object suitable for
# use as a bound variable.
def self.literal_duration(duration)
h = Hash.new(0)
duration.parts.each{|unit, value| h[unit] += value}
s = String.new
DURATION_UNITS.each do |unit|
if (v = h[unit]) != 0
s << "#{v.is_a?(Integer) ? v : sprintf('%0.6f', v)} #{unit} "
end
end
if s.empty?
'0'
else
s
end
end
# Creates callable objects that convert strings into ActiveSupport::Duration instances.
class Parser
# Whether ActiveSupport::Duration.new takes parts as array instead of hash
USE_PARTS_ARRAY = !defined?(ActiveSupport::VERSION::STRING) || ActiveSupport::VERSION::STRING < '5.1'
if defined?(ActiveSupport::Duration::SECONDS_PER_MONTH)
SECONDS_PER_MONTH = ActiveSupport::Duration::SECONDS_PER_MONTH
SECONDS_PER_YEAR = ActiveSupport::Duration::SECONDS_PER_YEAR
# :nocov:
else
SECONDS_PER_MONTH = 2592000
SECONDS_PER_YEAR = 31557600
# :nocov:
end
# Parse the interval input string into an ActiveSupport::Duration instance.
def call(string)
raise(InvalidValue, "invalid or unhandled interval format: #{string.inspect}") unless matches = /\A([+-]?\d+ years?\s?)?([+-]?\d+ mons?\s?)?([+-]?\d+ days?\s?)?(?:(?:([+-])?(\d{2,10}):(\d\d):(\d\d(\.\d+)?))|([+-]?\d+ hours?\s?)?([+-]?\d+ mins?\s?)?([+-]?\d+(\.\d+)? secs?\s?)?)?\z/.match(string)
value = 0
parts = {}
if v = matches[1]
v = v.to_i
value += SECONDS_PER_YEAR * v
parts[:years] = v
end
if v = matches[2]
v = v.to_i
value += SECONDS_PER_MONTH * v
parts[:months] = v
end
if v = matches[3]
v = v.to_i
value += 86400 * v
parts[:days] = v
end
if matches[5]
seconds = matches[5].to_i * 3600 + matches[6].to_i * 60
seconds += matches[8] ? matches[7].to_f : matches[7].to_i
seconds *= -1 if matches[4] == '-'
value += seconds
parts[:seconds] = seconds
elsif matches[9] || matches[10] || matches[11]
seconds = 0
if v = matches[9]
seconds += v.to_i * 3600
end
if v = matches[10]
seconds += v.to_i * 60
end
if v = matches[11]
seconds += matches[12] ? v.to_f : v.to_i
end
value += seconds
parts[:seconds] = seconds
end
# :nocov:
if USE_PARTS_ARRAY
parts = parts.to_a
end
# :nocov:
ActiveSupport::Duration.new(value, parts)
end
end
# Single instance of Parser used for parsing, to save on memory (since the parser has no state).
PARSER = Parser.new
# Reset the conversion procs if using the native postgres adapter,
# and extend the datasets to correctly literalize ActiveSupport::Duration values.
def self.extended(db)
db.instance_exec do
extend_datasets(IntervalDatasetMethods)
add_conversion_proc(1186, Postgres::IntervalDatabaseMethods::PARSER)
if respond_to?(:register_array_type)
register_array_type('interval', :oid=>1187, :scalar_oid=>1186)
end
@schema_type_classes[:interval] = ActiveSupport::Duration
end
end
# Handle ActiveSupport::Duration values in bound variables.
def bound_variable_arg(arg, conn)
case arg
when ActiveSupport::Duration
IntervalDatabaseMethods.literal_duration(arg)
else
super
end
end
private
# Set the :ruby_default value if the default value is recognized as an interval.
def schema_post_process(_)
super.each do |a|
h = a[1]
if h[:type] == :interval && h[:default] =~ /\A'([\w ]+)'::interval\z/
h[:ruby_default] = PARSER.call($1)
end
end
end
# Typecast value correctly to an ActiveSupport::Duration instance.
# If already an ActiveSupport::Duration, return it.
# If a numeric argument is given, assume it represents a number
# of seconds, and create a new ActiveSupport::Duration instance
# representing that number of seconds.
# If a String, assume it is in PostgreSQL interval output format
# and attempt to parse it.
def typecast_value_interval(value)
case value
when ActiveSupport::Duration
value
when Numeric
ActiveSupport::Duration.new(value, [[:seconds, value]])
when String
PARSER.call(typecast_check_string_length(value, 1000))
else
raise Sequel::InvalidValue, "invalid value for interval type: #{value.inspect}"
end
end
end
module IntervalDatasetMethods
private
# Allow auto parameterization of ActiveSupport::Duration instances.
def auto_param_type_fallback(v)
if defined?(super) && (type = super)
type
elsif ActiveSupport::Duration === v
"::interval"
end
end
# Handle literalization of ActiveSupport::Duration objects, treating them as
# PostgreSQL intervals.
def literal_other_append(sql, v)
case v
when ActiveSupport::Duration
literal_append(sql, IntervalDatabaseMethods.literal_duration(v))
sql << '::interval'
else
super
end
end
end
end
Database.register_extension(:pg_interval, Postgres::IntervalDatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/pg_json.rb 0000664 0000000 0000000 00000047356 14342141206 0021346 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_json extension adds support for Sequel to handle
# PostgreSQL's json and jsonb types. By default, it wraps
# JSON arrays and JSON objects with ruby array-like and
# hash-like objects. If you would like to wrap JSON primitives
# (numbers, strings, +null+, +true+, and +false+), you need to
# use the +wrap_json_primitives+ setter:
#
# DB.extension :pg_json
# DB.wrap_json_primitives = true
#
# Note that wrapping JSON primitives changes the behavior for
# JSON false and null values. Because only +false+ and +nil+
# in Ruby are considered falsey, wrapping these objects results
# in unexpected behavior if you use the values directly in
# conditionals:
#
# if DB[:table].get(:json_column)
# # called if the value of json_column is null/false
# # if you are wrapping primitives
# end
#
# To extract the Ruby primitive object from the wrapper object,
# you can use +__getobj__+ (this comes from Ruby's delegate library).
#
# To wrap an existing Ruby array, hash, string, integer, float,
# +nil+, +true+, or +false+, use +Sequel.pg_json_wrap+ or +Sequel.pg_jsonb_wrap+:
#
# Sequel.pg_json_wrap(object) # json type
# Sequel.pg_jsonb_wrap(object) # jsonb type
#
# So if you want to insert an array or hash into an json database column:
#
# DB[:table].insert(column: Sequel.pg_json_wrap([1, 2, 3]))
# DB[:table].insert(column: Sequel.pg_json_wrap({'a'=>1, 'b'=>2}))
#
# Note that the +pg_json_wrap+ and +pg_jsonb_wrap+ methods only handle Ruby primitives,
# they do not handle already wrapped objects.
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use the
# +pg_json+ and +pg_jsonb+ methods directly on Array or Hash:
#
# array.pg_json # json type
# array.pg_jsonb # jsonb type
#
# hash.pg_json # json type
# hash.pg_jsonb # jsonb type
#
# Model classes that use json or jsonb columns will have typecasting automatically
# setup, so you can assign Ruby primitives to model columns and have the wrapped
# objects automatically created. However, for backwards compatibility, passing
# a string object will parse the string as JSON, not create a JSON string object.
#
# obj = Model.new
# obj.json_column = {'a'=>'b'}
# obj.json_column.class
# # => Sequel::Postgres::JSONHash
# obj.json_column['a']
# # => 'b'
#
# obj.json_column = '{"a": "b"}'
# obj.json_column.class
# # => Sequel::Postgres::JSONHash
# obj.json_column['a']
# # => 'b'
#
# You can change the handling of string typecasting by using +typecast_json_strings+:
#
# DB.typecast_json_strings = true
# obj.json_column = '{"a": "b"}'
# obj.json_column.class
# # => Sequel::Postgres::JSONString
# obj.json_column
# # => '{"a": "b"}'
#
# Note that +nil+ values are never automatically wrapped:
#
# obj.json_column = nil
# obj.json_column.class
# # => NilClass
# obj.json_column
# # => nil
#
# If you want to set a JSON null value when using a model, you must wrap it
# explicitly:
#
# obj.json_column = Sequel.pg_json_wrap(nil)
# obj.json_column.class
# # => Sequel::Postgres::JSONNull
# obj.json_column
# # => nil
#
# To use this extension, load it into the Database instance:
#
# DB.extension :pg_json
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using json columns in CREATE/ALTER TABLE statements.
#
# This extension integrates with the pg_array extension. If you plan
# to use the json[] or jsonb[] types, load the pg_array extension before the
# pg_json extension:
#
# DB.extension :pg_array, :pg_json
#
# Note that when accessing json hashes, you should always use strings for keys.
# Attempting to use other values (such as symbols) will not work correctly.
#
# This extension requires both the json and delegate libraries. However, you
# can override +Sequel.parse_json+, +Sequel.object_to_json+, and
# +Sequel.json_parser_error_class+ to use an alternative JSON implementation.
#
# Related modules: Sequel::Postgres::JSONDatabaseMethods
require 'delegate'
require 'json'
module Sequel
module Postgres
# A module included in all of the JSON wrapper classes.
module JSONObject
end
# A module included in all of the JSONB wrapper classes.
module JSONBObject
end
create_delegate_class = lambda do |name, delegate_class|
base_class = DelegateClass(delegate_class)
base_class.class_eval do
include Sequel::SQL::AliasMethods
include Sequel::SQL::CastMethods
end
json_class = Class.new(base_class) do
include JSONObject
def sql_literal_append(ds, sql)
ds.literal_append(sql, Sequel.object_to_json(self))
sql << '::json'
end
# Allow automatic parameterization.
def sequel_auto_param_type(ds)
"::json"
end
end
jsonb_class = Class.new(base_class) do
include JSONBObject
def sql_literal_append(ds, sql)
ds.literal_append(sql, Sequel.object_to_json(self))
sql << '::jsonb'
end
# Allow automatic parameterization.
def sequel_auto_param_type(ds)
"::jsonb"
end
end
const_set(:"JSON#{name}Base", base_class)
const_set(:"JSON#{name}", json_class)
const_set(:"JSONB#{name}", jsonb_class)
end
create_delegate_class.call(:Array, Array)
create_delegate_class.call(:Hash, Hash)
create_delegate_class.call(:String, String)
create_delegate_class.call(:Integer, Integer)
create_delegate_class.call(:Float, Float)
create_delegate_class.call(:Null, NilClass)
create_delegate_class.call(:True, TrueClass)
create_delegate_class.call(:False, FalseClass)
JSON_WRAPPER_MAPPING = {
::Array => JSONArray,
::Hash => JSONHash,
}.freeze
JSONB_WRAPPER_MAPPING = {
::Array => JSONBArray,
::Hash => JSONBHash,
}.freeze
JSON_PRIMITIVE_WRAPPER_MAPPING = {
::String => JSONString,
::Integer => JSONInteger,
::Float => JSONFloat,
::NilClass => JSONNull,
::TrueClass => JSONTrue,
::FalseClass => JSONFalse,
}
JSONB_PRIMITIVE_WRAPPER_MAPPING = {
::String => JSONBString,
::Integer => JSONBInteger,
::Float => JSONBFloat,
::NilClass => JSONBNull,
::TrueClass => JSONBTrue,
::FalseClass => JSONBFalse,
}
if RUBY_VERSION < '2.4'
# :nocov:
JSON_PRIMITIVE_WRAPPER_MAPPING[Fixnum] = JSONInteger
JSON_PRIMITIVE_WRAPPER_MAPPING[Bignum] = JSONInteger
JSONB_PRIMITIVE_WRAPPER_MAPPING[Fixnum] = JSONBInteger
JSONB_PRIMITIVE_WRAPPER_MAPPING[Bignum] = JSONBInteger
# :nocov:
end
JSON_PRIMITIVE_WRAPPER_MAPPING.freeze
JSONB_PRIMITIVE_WRAPPER_MAPPING.freeze
JSON_COMBINED_WRAPPER_MAPPING =JSON_WRAPPER_MAPPING.merge(JSON_PRIMITIVE_WRAPPER_MAPPING).freeze
JSONB_COMBINED_WRAPPER_MAPPING =JSONB_WRAPPER_MAPPING.merge(JSONB_PRIMITIVE_WRAPPER_MAPPING).freeze
JSONB_WRAP_CLASSES = JSONB_COMBINED_WRAPPER_MAPPING.keys.freeze
Sequel::Deprecation.deprecate_constant(self, :JSON_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSONB_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSON_PRIMITIVE_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSONB_PRIMITIVE_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSON_COMBINED_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSONB_COMBINED_WRAPPER_MAPPING)
Sequel::Deprecation.deprecate_constant(self, :JSONB_WRAP_CLASSES)
JSON_WRAP_CLASSES = [Hash, Array, String, Integer, Float, NilClass, TrueClass, FalseClass].freeze
# Methods enabling Database object integration with the json type.
module JSONDatabaseMethods
def self.extended(db)
db.instance_exec do
add_conversion_proc(114, method(:_db_parse_json))
add_conversion_proc(3802, method(:_db_parse_jsonb))
if respond_to?(:register_array_type)
register_array_type('json', :oid=>199, :scalar_oid=>114)
register_array_type('jsonb', :oid=>3807, :scalar_oid=>3802)
end
@schema_type_classes[:json] = [JSONObject]
@schema_type_classes[:jsonb] = [JSONBObject]
end
end
# Return the wrapper class for the json type if value is Hash or Array.
def self.json_wrapper(value)
case value
when ::Hash
JSONHash
when ::Array
JSONArray
end
end
# Return the wrapper class for the jsonb type if value is Hash or Array.
def self.jsonb_wrapper(value)
case value
when ::Hash
JSONBHash
when ::Array
JSONBArray
end
end
# Return the wrapper class for the json type if value is a supported type.
def self.json_primitive_wrapper(value)
case value
when ::Hash
JSONHash
when ::Array
JSONArray
when ::String
JSONString
when ::Integer
JSONInteger
when ::Float
JSONFloat
when ::NilClass
JSONNull
when ::TrueClass
JSONTrue
when ::FalseClass
JSONFalse
end
end
# Return the wrapper class for the jsonb type if value is a supported type.
def self.jsonb_primitive_wrapper(value)
case value
when ::Hash
JSONBHash
when ::Array
JSONBArray
when ::String
JSONBString
when ::Integer
JSONBInteger
when ::Float
JSONBFloat
when ::NilClass
JSONBNull
when ::TrueClass
JSONBTrue
when ::FalseClass
JSONBFalse
end
end
# Deprecated
def self.db_parse_json(s)
# SEQUEL6: Remove
parse_json(s)
rescue Sequel::InvalidValue
raise unless s.is_a?(String)
parse_json("[#{s}]").first
end
# Deprecated
def self.db_parse_jsonb(s)
# SEQUEL6: Remove
parse_json(s, true)
rescue Sequel::InvalidValue
raise unless s.is_a?(String)
parse_json("[#{s}]").first
end
# Deprecated
def self.parse_json(s, jsonb=false)
# SEQUEL6: Remove
Sequel::Deprecation.deprecate("Sequel::Postgres::JSONDatabaseMethods.{parse_json,db_parse_json,db_parse_jsonb} are deprecated and will be removed in Sequel 6.")
begin
value = Sequel.parse_json(s)
rescue Sequel.json_parser_error_class => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
case value
when Array
(jsonb ? JSONBArray : JSONArray).new(value)
when Hash
(jsonb ? JSONBHash : JSONHash).new(value)
when String, Numeric, true, false, nil
value
else
raise Sequel::InvalidValue, "unhandled json value: #{value.inspect} (from #{s.inspect})"
end
end
# Whether to wrap JSON primitives instead of using Ruby objects.
# Wrapping the primitives allows the primitive values to roundtrip,
# but it can cause problems, especially as false/null JSON values
# will be treated as truthy in Ruby due to the wrapping. False by
# default.
attr_accessor :wrap_json_primitives
# Whether to typecast strings for json/jsonb types as JSON
# strings, instead of trying to parse the string as JSON.
# False by default.
attr_accessor :typecast_json_strings
# Handle json and jsonb types in bound variables
def bound_variable_arg(arg, conn)
case arg
when JSONObject, JSONBObject
Sequel.object_to_json(arg)
else
super
end
end
private
# Parse JSON data coming from the database. Since PostgreSQL allows
# non JSON data in JSON fields (such as plain numbers and strings),
# we don't want to raise an exception for that.
def _db_parse_json(s)
_wrap_json(_parse_json(s))
rescue Sequel::InvalidValue
raise unless s.is_a?(String)
_wrap_json(_parse_json("[#{s}]").first)
end
# Same as _db_parse_json, but consider the input as jsonb.
def _db_parse_jsonb(s)
_wrap_jsonb(_parse_json(s))
rescue Sequel::InvalidValue
raise unless s.is_a?(String)
_wrap_jsonb(_parse_json("[#{s}]").first)
end
# Parse the given string as json, returning either a JSONArray
# or JSONHash instance (or JSONBArray or JSONBHash instance if jsonb
# argument is true), or a String, Numeric, true, false, or nil
# if the json library used supports that.
def _parse_json(s)
Sequel.parse_json(s)
rescue Sequel.json_parser_error_class => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
# Wrap the parsed JSON value in the appropriate JSON wrapper class.
# Only wrap primitive values if wrap_json_primitives is set.
def _wrap_json(value)
if klass = JSONDatabaseMethods.json_wrapper(value)
klass.new(value)
elsif klass = JSONDatabaseMethods.json_primitive_wrapper(value)
if wrap_json_primitives
klass.new(value)
else
value
end
else
raise Sequel::InvalidValue, "unhandled json value: #{value.inspect}"
end
end
# Wrap the parsed JSON value in the appropriate JSONB wrapper class.
# Only wrap primitive values if wrap_json_primitives is set.
def _wrap_jsonb(value)
if klass = JSONDatabaseMethods.jsonb_wrapper(value)
klass.new(value)
elsif klass = JSONDatabaseMethods.jsonb_primitive_wrapper(value)
if wrap_json_primitives
klass.new(value)
else
value
end
else
raise Sequel::InvalidValue, "unhandled jsonb value: #{value.inspect}"
end
end
# Make the column type detection recognize the json types.
def schema_column_type(db_type)
case db_type
when 'json'
:json
when 'jsonb'
:jsonb
else
super
end
end
# Set the :callable_default value if the default value is recognized as an empty json/jsonb array/hash.
def schema_post_process(_)
super.each do |a|
h = a[1]
if (h[:type] == :json || h[:type] == :jsonb) && h[:default] =~ /\A'(\{\}|\[\])'::jsonb?\z/
is_array = $1 == '[]'
klass = if h[:type] == :json
if is_array
JSONArray
else
JSONHash
end
elsif is_array
JSONBArray
else
JSONBHash
end
h[:callable_default] = lambda{klass.new(is_array ? [] : {})}
end
end
end
# Convert the value given to a JSON wrapper object.
def typecast_value_json(value)
case value
when JSONObject
value
when String
if typecast_json_strings
JSONString.new(value)
else
_wrap_json(_parse_json(value))
end
when *JSON_WRAP_CLASSES
JSONDatabaseMethods.json_primitive_wrapper(value).new(value)
when JSONBObject
value = value.__getobj__
JSONDatabaseMethods.json_primitive_wrapper(value).new(value)
else
raise Sequel::InvalidValue, "invalid value for json: #{value.inspect}"
end
end
# Convert the value given to a JSONB wrapper object.
def typecast_value_jsonb(value)
case value
when JSONBObject
value
when String
if typecast_json_strings
JSONBString.new(value)
else
_wrap_jsonb(_parse_json(value))
end
when *JSON_WRAP_CLASSES
JSONDatabaseMethods.jsonb_primitive_wrapper(value).new(value)
when JSONObject
value = value.__getobj__
JSONDatabaseMethods.jsonb_primitive_wrapper(value).new(value)
else
raise Sequel::InvalidValue, "invalid value for jsonb: #{value.inspect}"
end
end
end
end
module SQL::Builders
# Wrap the array or hash in a Postgres::JSONArray or Postgres::JSONHash.
# Also handles Postgres::JSONObject and JSONBObjects.
# For other objects, calls +Sequel.pg_json_op+ (which is defined
# by the pg_json_ops extension).
def pg_json(v)
case v
when Postgres::JSONObject
v
when Array
Postgres::JSONArray.new(v)
when Hash
Postgres::JSONHash.new(v)
when Postgres::JSONBObject
v = v.__getobj__
Postgres::JSONDatabaseMethods.json_primitive_wrapper(v).new(v)
else
Sequel.pg_json_op(v)
end
end
# Wraps Ruby array, hash, string, integer, float, true, false, and nil
# values with the appropriate JSON wrapper. Raises an exception for
# other types.
def pg_json_wrap(v)
case v
when *Postgres::JSON_WRAP_CLASSES
Postgres::JSONDatabaseMethods.json_primitive_wrapper(v).new(v)
else
raise Error, "invalid value passed to Sequel.pg_json_wrap: #{v.inspect}"
end
end
# Wrap the array or hash in a Postgres::JSONBArray or Postgres::JSONBHash.
# Also handles Postgres::JSONObject and JSONBObjects.
# For other objects, calls +Sequel.pg_json_op+ (which is defined
# by the pg_json_ops extension).
def pg_jsonb(v)
case v
when Postgres::JSONBObject
v
when Array
Postgres::JSONBArray.new(v)
when Hash
Postgres::JSONBHash.new(v)
when Postgres::JSONObject
v = v.__getobj__
Postgres::JSONDatabaseMethods.jsonb_primitive_wrapper(v).new(v)
else
Sequel.pg_jsonb_op(v)
end
end
# Wraps Ruby array, hash, string, integer, float, true, false, and nil
# values with the appropriate JSONB wrapper. Raises an exception for
# other types.
def pg_jsonb_wrap(v)
case v
when *Postgres::JSON_WRAP_CLASSES
Postgres::JSONDatabaseMethods.jsonb_primitive_wrapper(v).new(v)
else
raise Error, "invalid value passed to Sequel.pg_jsonb_wrap: #{v.inspect}"
end
end
end
Database.register_extension(:pg_json, Postgres::JSONDatabaseMethods)
end
# :nocov:
if Sequel.core_extensions?
class Array
# Return a Sequel::Postgres::JSONArray proxy to the receiver.
# This is mostly useful as a short cut for creating JSONArray
# objects that didn't come from the database.
def pg_json
Sequel::Postgres::JSONArray.new(self)
end
# Return a Sequel::Postgres::JSONArray proxy to the receiver.
# This is mostly useful as a short cut for creating JSONArray
# objects that didn't come from the database.
def pg_jsonb
Sequel::Postgres::JSONBArray.new(self)
end
end
class Hash
# Return a Sequel::Postgres::JSONHash proxy to the receiver.
# This is mostly useful as a short cut for creating JSONHash
# objects that didn't come from the database.
def pg_json
Sequel::Postgres::JSONHash.new(self)
end
# Return a Sequel::Postgres::JSONHash proxy to the receiver.
# This is mostly useful as a short cut for creating JSONHash
# objects that didn't come from the database.
def pg_jsonb
Sequel::Postgres::JSONBHash.new(self)
end
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Array do
def pg_json
Sequel::Postgres::JSONArray.new(self)
end
def pg_jsonb
Sequel::Postgres::JSONBArray.new(self)
end
end
refine Hash do
def pg_json
Sequel::Postgres::JSONHash.new(self)
end
def pg_jsonb
Sequel::Postgres::JSONBHash.new(self)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_json_ops.rb 0000664 0000000 0000000 00000065727 14342141206 0022231 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_json_ops extension adds support to Sequel's DSL to make
# it easier to call PostgreSQL JSON functions and operators (added
# first in PostgreSQL 9.3). It also supports the JSONB functions
# and operators added in PostgreSQL 9.4, as well as additional
# functions and operators added in later versions.
#
# To load the extension:
#
# Sequel.extension :pg_json_ops
#
# The most common usage is passing an expression to Sequel.pg_json_op
# or Sequel.pg_jsonb_op:
#
# j = Sequel.pg_json_op(:json_column)
# jb = Sequel.pg_jsonb_op(:jsonb_column)
#
# If you have also loaded the pg_json extension, you can use
# Sequel.pg_json or Sequel.pg_jsonb as well:
#
# j = Sequel.pg_json(:json_column)
# jb = Sequel.pg_jsonb(:jsonb_column)
#
# Also, on most Sequel expression objects, you can call the pg_json
# or pg_jsonb method:
#
# j = Sequel[:json_column].pg_json
# jb = Sequel[:jsonb_column].pg_jsonb
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#pg_json or
# Symbol#pg_jsonb:
#
# j = :json_column.pg_json
# jb = :jsonb_column.pg_jsonb
#
# This creates a Sequel::Postgres::JSONOp or Sequel::Postgres::JSONBOp object that can be used
# for easier querying. The following methods are available for both JSONOp and JSONBOp instances:
#
# j[1] # (json_column -> 1)
# j[%w'a b'] # (json_column #> ARRAY['a','b'])
# j.get_text(1) # (json_column ->> 1)
# j.get_text(%w'a b') # (json_column #>> ARRAY['a','b'])
# j.extract('a', 'b') # json_extract_path(json_column, 'a', 'b')
# j.extract_text('a', 'b') # json_extract_path_text(json_column, 'a', 'b')
#
# j.array_length # json_array_length(json_column)
# j.array_elements # json_array_elements(json_column)
# j.array_elements_text # json_array_elements_text(json_column)
# j.each # json_each(json_column)
# j.each_text # json_each_text(json_column)
# j.keys # json_object_keys(json_column)
# j.typeof # json_typeof(json_column)
# j.strip_nulls # json_strip_nulls(json_column)
#
# j.populate(:a) # json_populate_record(:a, json_column)
# j.populate_set(:a) # json_populate_recordset(:a, json_column)
# j.to_record # json_to_record(json_column)
# j.to_recordset # json_to_recordset(json_column)
#
# There are additional methods are are only supported on JSONBOp instances:
#
# j - 1 # (jsonb_column - 1)
# j.concat(:h) # (jsonb_column || h)
# j.contain_all(:a) # (jsonb_column ?& a)
# j.contain_any(:a) # (jsonb_column ?| a)
# j.contains(:h) # (jsonb_column @> h)
# j.contained_by(:h) # (jsonb_column <@ h)
# j.delete_path(%w'0 a') # (jsonb_column #- ARRAY['0','a'])
# j.has_key?('a') # (jsonb_column ? 'a')
# j.insert(%w'0 a', 'a'=>1) # jsonb_insert(jsonb_column, ARRAY[0, 'a'], '{"a":1}'::jsonb, false)
# j.pretty # jsonb_pretty(jsonb_column)
# j.set(%w'0 a', :h) # jsonb_set(jsonb_column, ARRAY['0','a'], h, true)
#
# j.set_lax(%w'0 a', :h, false, 'raise_exception')
# # jsonb_set_lax(jsonb_column, ARRAY['0','a'], h, false, 'raise_exception')
#
# On PostgreSQL 12+ SQL/JSON path functions and operators are supported:
#
# j.path_exists('$.foo') # (jsonb_column @? '$.foo')
# j.path_match('$.foo') # (jsonb_column @@ '$.foo')
#
# j.path_exists!('$.foo') # jsonb_path_exists(jsonb_column, '$.foo')
# j.path_match!('$.foo') # jsonb_path_match(jsonb_column, '$.foo')
# j.path_query('$.foo') # jsonb_path_query(jsonb_column, '$.foo')
# j.path_query_array('$.foo') # jsonb_path_query_array(jsonb_column, '$.foo')
# j.path_query_first('$.foo') # jsonb_path_query_first(jsonb_column, '$.foo')
#
# On PostgreSQL 13+ timezone-aware SQL/JSON path functions and operators are supported:
#
# j.path_exists_tz!('$.foo') # jsonb_path_exists_tz(jsonb_column, '$.foo')
# j.path_match_tz!('$.foo') # jsonb_path_match_tz(jsonb_column, '$.foo')
# j.path_query_tz('$.foo') # jsonb_path_query_tz(jsonb_column, '$.foo')
# j.path_query_array_tz('$.foo') # jsonb_path_query_array_tz(jsonb_column, '$.foo')
# j.path_query_first_tz('$.foo') # jsonb_path_query_first_tz(jsonb_column, '$.foo')
#
# For the PostgreSQL 12+ SQL/JSON path functions, one argument is required (+path+) and
# two more arguments are optional (+vars+ and +silent+). +path+ specifies the JSON path.
# +vars+ specifies a hash or a string in JSON format of named variables to be
# substituted in +path+. +silent+ specifies whether errors are suppressed. By default,
# errors are not suppressed.
#
# On PostgreSQL 14+, The JSONB [] method will use subscripts instead of being
# the same as +get+, if the value being wrapped is an identifer:
#
# Sequel.pg_jsonb_op(:jsonb_column)[1] # jsonb_column[1]
# Sequel.pg_jsonb_op(:jsonb_column)[1][2] # jsonb_column[1][2]
# Sequel.pg_jsonb_op(Sequel[:j][:b])[1] # j.b[1]
#
# This support allows you to use JSONB subscripts in UPDATE statements to update only
# part of a column:
#
# c = Sequel.pg_jsonb_op(:c)
# DB[:t].update(c['key1'] => '1', c['key2'] => '"a"')
# # UPDATE "t" SET "c"['key1'] = '1', "c"['key2'] = '"a"'
#
# Note that you have to provide the value of a JSONB subscript as a JSONB value, so this
# will update +key1+ to use the number 1, and +key2+ to use the string a.
# For this reason it may be simpler to use +to_json+:
#
# c = Sequel.pg_jsonb_op(:c)
# DB[:t].update(c['key1'] => 1.to_json, c['key2'] => "a".to_json)
#
# If you are also using the pg_json extension, you should load it before
# loading this extension. Doing so will allow you to use the #op method on
# JSONHash, JSONHarray, JSONBHash, and JSONBArray, allowing you to perform json/jsonb operations
# on json/jsonb literals.
#
# In order to get the automatic conversion from a ruby array to a PostgreSQL array
# (as shown in the #[] and #get_text examples above), you need to load the pg_array
# extension.
#
# Related modules: Sequel::Postgres::JSONBaseOp, Sequel::Postgres::JSONOp,
# Sequel::Postgres::JSONBOp
#
module Sequel
module Postgres
# The JSONBaseOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# PostgreSQL json operators and functions.
#
# In the method documentation examples, assume that:
#
# json_op = Sequel.pg_json(:json)
class JSONBaseOp < Sequel::SQL::Wrapper
GET = ["(".freeze, " -> ".freeze, ")".freeze].freeze
GET_TEXT = ["(".freeze, " ->> ".freeze, ")".freeze].freeze
GET_PATH = ["(".freeze, " #> ".freeze, ")".freeze].freeze
GET_PATH_TEXT = ["(".freeze, " #>> ".freeze, ")".freeze].freeze
# Get JSON array element or object field as json. If an array is given,
# gets the object at the specified path.
#
# json_op[1] # (json -> 1)
# json_op['a'] # (json -> 'a')
# json_op[%w'a b'] # (json #> ARRAY['a', 'b'])
def [](key)
if is_array?(key)
json_op(GET_PATH, wrap_array(key))
else
json_op(GET, key)
end
end
alias get []
# Returns a set of json values for the elements in the json array.
#
# json_op.array_elements # json_array_elements(json)
def array_elements
function(:array_elements)
end
# Returns a set of text values for the elements in the json array.
#
# json_op.array_elements_text # json_array_elements_text(json)
def array_elements_text
function(:array_elements_text)
end
# Get the length of the outermost json array.
#
# json_op.array_length # json_array_length(json)
def array_length
Sequel::SQL::NumericExpression.new(:NOOP, function(:array_length))
end
# Returns a set of key and value pairs, where the keys
# are text and the values are JSON.
#
# json_op.each # json_each(json)
def each
function(:each)
end
# Returns a set of key and value pairs, where the keys
# and values are both text.
#
# json_op.each_text # json_each_text(json)
def each_text
function(:each_text)
end
# Returns a json value for the object at the given path.
#
# json_op.extract('a') # json_extract_path(json, 'a')
# json_op.extract('a', 'b') # json_extract_path(json, 'a', 'b')
def extract(*a)
self.class.new(function(:extract_path, *a))
end
# Returns a text value for the object at the given path.
#
# json_op.extract_text('a') # json_extract_path_text(json, 'a')
# json_op.extract_text('a', 'b') # json_extract_path_text(json, 'a', 'b')
def extract_text(*a)
Sequel::SQL::StringExpression.new(:NOOP, function(:extract_path_text, *a))
end
# Get JSON array element or object field as text. If an array is given,
# gets the object at the specified path.
#
# json_op.get_text(1) # (json ->> 1)
# json_op.get_text('a') # (json ->> 'a')
# json_op.get_text(%w'a b') # (json #>> ARRAY['a', 'b'])
def get_text(key)
if is_array?(key)
json_op(GET_PATH_TEXT, wrap_array(key))
else
json_op(GET_TEXT, key)
end
end
# Returns a set of keys AS text in the json object.
#
# json_op.keys # json_object_keys(json)
def keys
function(:object_keys)
end
# Expands the given argument using the columns in the json.
#
# json_op.populate(arg) # json_populate_record(arg, json)
def populate(arg)
SQL::Function.new(function_name(:populate_record), arg, self)
end
# Expands the given argument using the columns in the json.
#
# json_op.populate_set(arg) # json_populate_recordset(arg, json)
def populate_set(arg)
SQL::Function.new(function_name(:populate_recordset), arg, self)
end
# Returns a json value stripped of all internal null values.
#
# json_op.strip_nulls # json_strip_nulls(json)
def strip_nulls
self.class.new(function(:strip_nulls))
end
# Builds arbitrary record from json object. You need to define the
# structure of the record using #as on the resulting object:
#
# json_op.to_record.as(:x, [Sequel.lit('a integer'), Sequel.lit('b text')]) # json_to_record(json) AS x(a integer, b text)
def to_record
function(:to_record)
end
# Builds arbitrary set of records from json array of objects. You need to define the
# structure of the records using #as on the resulting object:
#
# json_op.to_recordset.as(:x, [Sequel.lit('a integer'), Sequel.lit('b text')]) # json_to_recordset(json) AS x(a integer, b text)
def to_recordset
function(:to_recordset)
end
# Returns the type of the outermost json value as text.
#
# json_op.typeof # json_typeof(json)
def typeof
function(:typeof)
end
private
# Return a placeholder literal with the given str and args, wrapped
# in an JSONOp or JSONBOp, used by operators that return json or jsonb.
def json_op(str, args)
self.class.new(Sequel::SQL::PlaceholderLiteralString.new(str, [self, args]))
end
# Return a function with the given name, and the receiver as the first
# argument, with any additional arguments given.
def function(name, *args)
SQL::Function.new(function_name(name), self, *args)
end
# Whether the given object represents an array in PostgreSQL.
def is_array?(a)
a.is_a?(Array) || (defined?(PGArray) && a.is_a?(PGArray)) || (defined?(ArrayOp) && a.is_a?(ArrayOp))
end
# Automatically wrap argument in a PGArray if it is a plain Array.
# Requires that the pg_array extension has been loaded to work.
def wrap_array(arg)
if arg.instance_of?(Array) && Sequel.respond_to?(:pg_array)
Sequel.pg_array(arg)
else
arg
end
end
end
# JSONBaseOp subclass for the json type
class JSONOp < JSONBaseOp
# Return the receiver, since it is already a JSONOp.
def pg_json
self
end
private
# The json type functions are prefixed with json_
def function_name(name)
"json_#{name}"
end
end
# JSONBaseOp subclass for the jsonb type.
#
# In the method documentation examples, assume that:
#
# jsonb_op = Sequel.pg_jsonb(:jsonb)
class JSONBOp < JSONBaseOp
CONCAT = ["(".freeze, " || ".freeze, ")".freeze].freeze
CONTAIN_ALL = ["(".freeze, " ?& ".freeze, ")".freeze].freeze
CONTAIN_ANY = ["(".freeze, " ?| ".freeze, ")".freeze].freeze
CONTAINS = ["(".freeze, " @> ".freeze, ")".freeze].freeze
CONTAINED_BY = ["(".freeze, " <@ ".freeze, ")".freeze].freeze
DELETE_PATH = ["(".freeze, " #- ".freeze, ")".freeze].freeze
HAS_KEY = ["(".freeze, " ? ".freeze, ")".freeze].freeze
PATH_EXISTS = ["(".freeze, " @? ".freeze, ")".freeze].freeze
PATH_MATCH = ["(".freeze, " @@ ".freeze, ")".freeze].freeze
# Support subscript syntax for JSONB.
def [](key)
if is_array?(key)
super
else
case @value
when Symbol, SQL::Identifier, SQL::QualifiedIdentifier, JSONBSubscriptOp
# Only use subscripts for identifiers. In other cases, switching from
# the -> operator to [] for subscripts causes SQL syntax issues. You
# only need the [] for subscripting when doing assignment, and
# assignment is generally done on identifiers.
self.class.new(JSONBSubscriptOp.new(self, key))
else
super
end
end
end
# jsonb expression for deletion of the given argument from the
# current jsonb.
#
# jsonb_op - "a" # (jsonb - 'a')
def -(other)
self.class.new(super)
end
# jsonb expression for concatenation of the given jsonb into
# the current jsonb.
#
# jsonb_op.concat(:h) # (jsonb || h)
def concat(other)
json_op(CONCAT, wrap_input_jsonb(other))
end
# Check if the receiver contains all of the keys in the given array:
#
# jsonb_op.contain_all(:a) # (jsonb ?& a)
def contain_all(other)
bool_op(CONTAIN_ALL, wrap_input_array(other))
end
# Check if the receiver contains any of the keys in the given array:
#
# jsonb_op.contain_any(:a) # (jsonb ?| a)
def contain_any(other)
bool_op(CONTAIN_ANY, wrap_input_array(other))
end
# Check if the receiver contains all entries in the other jsonb:
#
# jsonb_op.contains(:h) # (jsonb @> h)
def contains(other)
bool_op(CONTAINS, wrap_input_jsonb(other))
end
# Check if the other jsonb contains all entries in the receiver:
#
# jsonb_op.contained_by(:h) # (jsonb <@ h)
def contained_by(other)
bool_op(CONTAINED_BY, wrap_input_jsonb(other))
end
# Removes the given path from the receiver.
#
# jsonb_op.delete_path(:h) # (jsonb #- h)
def delete_path(other)
json_op(DELETE_PATH, wrap_input_array(other))
end
# Check if the receiver contains the given key:
#
# jsonb_op.has_key?('a') # (jsonb ? 'a')
def has_key?(key)
bool_op(HAS_KEY, key)
end
alias include? has_key?
# Inserts the given jsonb value at the given path in the receiver.
# The default is to insert the value before the given path, but
# insert_after can be set to true to insert it after the given path.
#
# jsonb_op.insert(['a', 'b'], h) # jsonb_insert(jsonb, ARRAY['a', 'b'], h, false)
# jsonb_op.insert(['a', 'b'], h, true) # jsonb_insert(jsonb, ARRAY['a', 'b'], h, true)
def insert(path, other, insert_after=false)
self.class.new(function(:insert, wrap_input_array(path), wrap_input_jsonb(other), insert_after))
end
# Returns whether the JSON path returns any item for the json object.
#
# json_op.path_exists("$.foo") # (json @? '$.foo')
def path_exists(path)
bool_op(PATH_EXISTS, path)
end
# Returns whether the JSON path returns any item for the json object.
#
# json_op.path_exists!("$.foo")
# # jsonb_path_exists(json, '$.foo')
#
# json_op.path_exists!("$.foo ? ($ > $x)", x: 2)
# # jsonb_path_exists(json, '$.foo ? ($ > $x)', '{"x":2}')
#
# json_op.path_exists!("$.foo ? ($ > $x)", {x: 2}, true)
# # jsonb_path_exists(json, '$.foo ? ($ > $x)', '{"x":2}', true)
def path_exists!(path, vars=nil, silent=nil)
Sequel::SQL::BooleanExpression.new(:NOOP, _path_function(:jsonb_path_exists, path, vars, silent))
end
# The same as #path_exists!, except that timezone-aware conversions are used for date/time values.
def path_exists_tz!(path, vars=nil, silent=nil)
Sequel::SQL::BooleanExpression.new(:NOOP, _path_function(:jsonb_path_exists_tz, path, vars, silent))
end
# Returns the first item of the result of JSON path predicate check for the json object.
# Returns nil if the first item is not true or false.
#
# json_op.path_match("$.foo") # (json @@ '$.foo')
def path_match(path)
bool_op(PATH_MATCH, path)
end
# Returns the first item of the result of JSON path predicate check for the json object.
# Returns nil if the first item is not true or false and silent is true.
#
# json_op.path_match!("$.foo")
# # jsonb_path_match(json, '$.foo')
#
# json_op.path_match!("$.foo ? ($ > $x)", x: 2)
# # jsonb_path_match(json, '$.foo ? ($ > $x)', '{"x":2}')
#
# json_op.path_match!("$.foo ? ($ > $x)", {x: 2}, true)
# # jsonb_path_match(json, '$.foo ? ($ > $x)', '{"x":2}', true)
def path_match!(path, vars=nil, silent=nil)
Sequel::SQL::BooleanExpression.new(:NOOP, _path_function(:jsonb_path_match, path, vars, silent))
end
# The same as #path_match!, except that timezone-aware conversions are used for date/time values.
def path_match_tz!(path, vars=nil, silent=nil)
Sequel::SQL::BooleanExpression.new(:NOOP, _path_function(:jsonb_path_match_tz, path, vars, silent))
end
# Returns a set of all jsonb values specified by the JSON path
# for the json object.
#
# json_op.path_query("$.foo")
# # jsonb_path_query(json, '$.foo')
#
# json_op.path_query("$.foo ? ($ > $x)", x: 2)
# # jsonb_path_query(json, '$.foo ? ($ > $x)', '{"x":2}')
#
# json_op.path_query("$.foo ? ($ > $x)", {x: 2}, true)
# # jsonb_path_query(json, '$.foo ? ($ > $x)', '{"x":2}', true)
def path_query(path, vars=nil, silent=nil)
_path_function(:jsonb_path_query, path, vars, silent)
end
# The same as #path_query, except that timezone-aware conversions are used for date/time values.
def path_query_tz(path, vars=nil, silent=nil)
_path_function(:jsonb_path_query_tz, path, vars, silent)
end
# Returns a jsonb array of all values specified by the JSON path
# for the json object.
#
# json_op.path_query_array("$.foo")
# # jsonb_path_query_array(json, '$.foo')
#
# json_op.path_query_array("$.foo ? ($ > $x)", x: 2)
# # jsonb_path_query_array(json, '$.foo ? ($ > $x)', '{"x":2}')
#
# json_op.path_query_array("$.foo ? ($ > $x)", {x: 2}, true)
# # jsonb_path_query_array(json, '$.foo ? ($ > $x)', '{"x":2}', true)
def path_query_array(path, vars=nil, silent=nil)
JSONBOp.new(_path_function(:jsonb_path_query_array, path, vars, silent))
end
# The same as #path_query_array, except that timezone-aware conversions are used for date/time values.
def path_query_array_tz(path, vars=nil, silent=nil)
JSONBOp.new(_path_function(:jsonb_path_query_array_tz, path, vars, silent))
end
# Returns the first item of the result specified by the JSON path
# for the json object.
#
# json_op.path_query_first("$.foo")
# # jsonb_path_query_first(json, '$.foo')
#
# json_op.path_query_first("$.foo ? ($ > $x)", x: 2)
# # jsonb_path_query_first(json, '$.foo ? ($ > $x)', '{"x":2}')
#
# json_op.path_query_first("$.foo ? ($ > $x)", {x: 2}, true)
# # jsonb_path_query_first(json, '$.foo ? ($ > $x)', '{"x":2}', true)
def path_query_first(path, vars=nil, silent=nil)
JSONBOp.new(_path_function(:jsonb_path_query_first, path, vars, silent))
end
# The same as #path_query_first, except that timezone-aware conversions are used for date/time values.
def path_query_first_tz(path, vars=nil, silent=nil)
JSONBOp.new(_path_function(:jsonb_path_query_first_tz, path, vars, silent))
end
# Return the receiver, since it is already a JSONBOp.
def pg_jsonb
self
end
# Return a pretty printed version of the receiver as a string expression.
#
# jsonb_op.pretty # jsonb_pretty(jsonb)
def pretty
Sequel::SQL::StringExpression.new(:NOOP, function(:pretty))
end
# Set the given jsonb value at the given path in the receiver.
# By default, this will create the value if it does not exist, but
# create_missing can be set to false to not create a new value.
#
# jsonb_op.set(['a', 'b'], h) # jsonb_set(jsonb, ARRAY['a', 'b'], h, true)
# jsonb_op.set(['a', 'b'], h, false) # jsonb_set(jsonb, ARRAY['a', 'b'], h, false)
def set(path, other, create_missing=true)
self.class.new(function(:set, wrap_input_array(path), wrap_input_jsonb(other), create_missing))
end
# The same as #set, except if +other+ is +nil+, then behaves according to +null_value_treatment+,
# which can be one of 'raise_exception', 'use_json_null' (default), 'delete_key', or 'return_target'.
def set_lax(path, other, create_missing=true, null_value_treatment='use_json_null')
self.class.new(function(:set_lax, wrap_input_array(path), wrap_input_jsonb(other), create_missing, null_value_treatment))
end
private
# Internals of the jsonb SQL/JSON path functions.
def _path_function(func, path, vars, silent)
args = []
if vars
if vars.is_a?(Hash)
vars = vars.to_json
end
args << vars
unless silent.nil?
args << silent
end
end
SQL::Function.new(func, self, path, *args)
end
# Return a placeholder literal with the given str and args, wrapped
# in a boolean expression, used by operators that return booleans.
def bool_op(str, other)
Sequel::SQL::BooleanExpression.new(:NOOP, Sequel::SQL::PlaceholderLiteralString.new(str, [value, other]))
end
# Wrap argument in a PGArray if it is an array
def wrap_input_array(obj)
if obj.is_a?(Array) && Sequel.respond_to?(:pg_array)
Sequel.pg_array(obj)
else
obj
end
end
# Wrap argument in a JSONBArray or JSONBHash if it is an array or hash.
def wrap_input_jsonb(obj)
if Sequel.respond_to?(:pg_jsonb) && (obj.is_a?(Array) || obj.is_a?(Hash))
Sequel.pg_jsonb(obj)
else
obj
end
end
# The jsonb type functions are prefixed with jsonb_
def function_name(name)
"jsonb_#{name}"
end
end
# Represents JSONB subscripts. This is abstracted because the
# subscript support depends on the database version.
class JSONBSubscriptOp < SQL::Expression
SUBSCRIPT = ["".freeze, "[".freeze, "]".freeze].freeze
# The expression being subscripted
attr_reader :expression
# The subscript to use
attr_reader :sub
# Set the expression and subscript to the given arguments
def initialize(expression, sub)
@expression = expression
@sub = sub
freeze
end
# Use subscripts instead of -> operator on PostgreSQL 14+
def to_s_append(ds, sql)
server_version = ds.db.server_version
frag = server_version && server_version >= 140000 ? SUBSCRIPT : JSONOp::GET
ds.literal_append(sql, Sequel::SQL::PlaceholderLiteralString.new(frag, [@expression, @sub]))
end
# Support transforming of jsonb subscripts
def sequel_ast_transform(transformer)
self.class.new(transformer.call(@expression), transformer.call(@sub))
end
end
module JSONOpMethods
# Wrap the receiver in an JSONOp so you can easily use the PostgreSQL
# json functions and operators with it.
def pg_json
JSONOp.new(self)
end
#
# Wrap the receiver in an JSONBOp so you can easily use the PostgreSQL
# jsonb functions and operators with it.
def pg_jsonb
JSONBOp.new(self)
end
end
# :nocov:
if defined?(JSONArray)
# :nocov:
class JSONArray
# Wrap the JSONArray instance in an JSONOp, allowing you to easily use
# the PostgreSQL json functions and operators with literal jsons.
def op
JSONOp.new(self)
end
end
class JSONHash
# Wrap the JSONHash instance in an JSONOp, allowing you to easily use
# the PostgreSQL json functions and operators with literal jsons.
def op
JSONOp.new(self)
end
end
class JSONBArray
# Wrap the JSONBArray instance in an JSONBOp, allowing you to easily use
# the PostgreSQL jsonb functions and operators with literal jsonbs.
def op
JSONBOp.new(self)
end
end
class JSONBHash
# Wrap the JSONBHash instance in an JSONBOp, allowing you to easily use
# the PostgreSQL jsonb functions and operators with literal jsonbs.
def op
JSONBOp.new(self)
end
end
end
end
module SQL::Builders
# Return the object wrapped in an Postgres::JSONOp.
def pg_json_op(v)
case v
when Postgres::JSONOp
v
else
Postgres::JSONOp.new(v)
end
end
# Return the object wrapped in an Postgres::JSONBOp.
def pg_jsonb_op(v)
case v
when Postgres::JSONBOp
v
else
Postgres::JSONBOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::Postgres::JSONOpMethods
end
class LiteralString
include Sequel::Postgres::JSONOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::JSONOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::JSONOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_loose_count.rb 0000664 0000000 0000000 00000002152 14342141206 0022707 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_loose_count extension looks at the table statistics
# in the PostgreSQL system tables to get a fast approximate
# count of the number of rows in a given table:
#
# DB.loose_count(:table) # => 123456
#
# It can also support schema qualified tables:
#
# DB.loose_count(Sequel[:schema][:table]) # => 123456
#
# How accurate this count is depends on the number of rows
# added/deleted from the table since the last time it was
# analyzed. If the table has not been vacuumed or analyzed
# yet, this can return 0 or -1 depending on the PostgreSQL
# version in use.
#
# To load the extension into the database:
#
# DB.extension :pg_loose_count
#
# Related module: Sequel::Postgres::LooseCount
#
module Sequel
module Postgres
module LooseCount
# Look at the table statistics for the given table to get
# an approximate count of the number of rows.
def loose_count(table)
from(:pg_class).where(:oid=>regclass_oid(table)).get(Sequel.cast(:reltuples, Integer))
end
end
end
Database.register_extension(:pg_loose_count, Postgres::LooseCount)
end
sequel-5.63.0/lib/sequel/extensions/pg_multirange.rb 0000664 0000000 0000000 00000032635 14342141206 0022536 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_multirange extension adds support for the PostgreSQL 14+ multirange
# types to Sequel. PostgreSQL multirange types are similar to an array of
# ranges, where a match against the multirange is a match against any of the
# ranges in the multirange.
#
# When PostgreSQL multirange values are retrieved, they are parsed and returned
# as instances of Sequel::Postgres::PGMultiRange. PGMultiRange mostly acts
# like an array of Sequel::Postgres::PGRange (see the pg_range extension).
#
# In addition to the parser, this extension comes with literalizers
# for PGMultiRanges, so they can be used in queries and as bound variables.
#
# To turn an existing array of Ranges into a PGMultiRange, use Sequel.pg_multirange.
# You must provide the type of multirange when creating the multirange:
#
# Sequel.pg_multirange(array_of_date_ranges, :datemultirange)
#
# To use this extension, load it into the Database instance:
#
# DB.extension :pg_multirange
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using multirange type columns in CREATE/ALTER TABLE statements.
#
# This extension makes it easy to add support for other multirange types. In
# general, you just need to make sure that the subtype is handled and has the
# appropriate converter installed. For user defined
# types, you can do this via:
#
# DB.add_conversion_proc(subtype_oid){|string| }
#
# Then you can call
# Sequel::Postgres::PGMultiRange::DatabaseMethods#register_multirange_type
# to automatically set up a handler for the range type. So if you
# want to support the timemultirange type (assuming the time type is already
# supported):
#
# DB.register_multirange_type('timerange')
#
# This extension integrates with the pg_array extension. If you plan
# to use arrays of multirange types, load the pg_array extension before the
# pg_multirange extension:
#
# DB.extension :pg_array, :pg_multirange
#
# The pg_multirange extension will automatically load the pg_range extension.
#
# Related module: Sequel::Postgres::PGMultiRange
require 'delegate'
require 'strscan'
module Sequel
module Postgres
class PGMultiRange < DelegateClass(Array)
include Sequel::SQL::AliasMethods
# Converts strings into PGMultiRange instances.
class Parser < StringScanner
def initialize(source, converter)
super(source)
@converter = converter
end
# Parse the multirange type input string into a PGMultiRange value.
def parse
raise Sequel::Error, "invalid multirange, doesn't start with {" unless get_byte == '{'
ranges = []
unless scan(/\}/)
while true
raise Sequel::Error, "unfinished multirange" unless range_string = scan_until(/[\]\)]/)
ranges << @converter.call(range_string)
case sep = get_byte
when '}'
break
when ','
# nothing
else
raise Sequel::Error, "invalid multirange separator: #{sep.inspect}"
end
end
end
raise Sequel::Error, "invalid multirange, remaining data after }" unless eos?
ranges
end
end
# Callable object that takes the input string and parses it using Parser.
class Creator
# The database type to set on the PGMultiRange instances returned.
attr_reader :type
def initialize(type, converter=nil)
@type = type
@converter = converter
end
# Parse the string using Parser with the appropriate
# converter, and return a PGMultiRange with the appropriate database
# type.
def call(string)
PGMultiRange.new(Parser.new(string, @converter).parse, @type)
end
end
module DatabaseMethods
# Add the default multirange conversion procs to the database
def self.extended(db)
db.instance_exec do
raise Error, "multiranges not supported on this database" unless server_version >= 140000
extension :pg_range
@pg_multirange_schema_types ||= {}
register_multirange_type('int4multirange', :range_oid=>3904, :oid=>4451)
register_multirange_type('nummultirange', :range_oid=>3906, :oid=>4532)
register_multirange_type('tsmultirange', :range_oid=>3908, :oid=>4533)
register_multirange_type('tstzmultirange', :range_oid=>3910, :oid=>4534)
register_multirange_type('datemultirange', :range_oid=>3912, :oid=>4535)
register_multirange_type('int8multirange', :range_oid=>3926, :oid=>4536)
if respond_to?(:register_array_type)
register_array_type('int4multirange', :oid=>6150, :scalar_oid=>4451, :scalar_typecast=>:int4multirange)
register_array_type('nummultirange', :oid=>6151, :scalar_oid=>4532, :scalar_typecast=>:nummultirange)
register_array_type('tsmultirange', :oid=>6152, :scalar_oid=>4533, :scalar_typecast=>:tsmultirange)
register_array_type('tstzmultirange', :oid=>6153, :scalar_oid=>4534, :scalar_typecast=>:tstzmultirange)
register_array_type('datemultirange', :oid=>6155, :scalar_oid=>4535, :scalar_typecast=>:datemultirange)
register_array_type('int8multirange', :oid=>6157, :scalar_oid=>4536, :scalar_typecast=>:int8multirange)
end
[:int4multirange, :nummultirange, :tsmultirange, :tstzmultirange, :datemultirange, :int8multirange].each do |v|
@schema_type_classes[v] = PGMultiRange
end
procs = conversion_procs
add_conversion_proc(4533, PGMultiRange::Creator.new("tsmultirange", procs[3908]))
add_conversion_proc(4534, PGMultiRange::Creator.new("tstzmultirange", procs[3910]))
if respond_to?(:register_array_type) && defined?(PGArray::Creator)
add_conversion_proc(6152, PGArray::Creator.new("tsmultirange", procs[4533]))
add_conversion_proc(6153, PGArray::Creator.new("tstzmultirange", procs[4534]))
end
end
end
# Handle PGMultiRange values in bound variables
def bound_variable_arg(arg, conn)
case arg
when PGMultiRange
arg.unquoted_literal(schema_utility_dataset)
else
super
end
end
# Freeze the pg multirange schema types to prevent adding new ones.
def freeze
@pg_multirange_schema_types.freeze
super
end
# Register a database specific multirange type. This can be used to support
# different multirange types per Database. Options:
#
# :converter :: A callable object (e.g. Proc), that is called with the PostgreSQL range string,
# and should return a PGRange instance.
# :oid :: The PostgreSQL OID for the multirange type. This is used by Sequel to set up automatic type
# conversion on retrieval from the database.
# :range_oid :: Should be the PostgreSQL OID for the multirange subtype (the range type). If given,
# automatically sets the :converter option by looking for scalar conversion
# proc.
#
# If a block is given, it is treated as the :converter option.
def register_multirange_type(db_type, opts=OPTS, &block)
oid = opts[:oid]
soid = opts[:range_oid]
if has_converter = opts.has_key?(:converter)
raise Error, "can't provide both a block and :converter option to register_multirange_type" if block
converter = opts[:converter]
else
has_converter = true if block
converter = block
end
unless (soid || has_converter) && oid
range_oid, subtype_oid = from(:pg_range).join(:pg_type, :oid=>:rngmultitypid).where(:typname=>db_type.to_s).get([:rngmultitypid, :rngtypid])
soid ||= subtype_oid unless has_converter
oid ||= range_oid
end
db_type = db_type.to_s.dup.freeze
if soid
raise Error, "can't provide both a converter and :range_oid option to register" if has_converter
raise Error, "no conversion proc for :range_oid=>#{soid.inspect} in conversion_procs" unless converter = conversion_procs[soid]
end
raise Error, "cannot add a multirange type without a convertor (use :converter or :range_oid option or pass block)" unless converter
creator = Creator.new(db_type, converter)
add_conversion_proc(oid, creator)
@pg_multirange_schema_types[db_type] = db_type.to_sym
singleton_class.class_eval do
meth = :"typecast_value_#{db_type}"
scalar_typecast_method = :"typecast_value_#{opts.fetch(:scalar_typecast, db_type.sub('multirange', 'range'))}"
define_method(meth){|v| typecast_value_pg_multirange(v, creator, scalar_typecast_method)}
private meth
end
@schema_type_classes[db_type] = PGMultiRange
nil
end
private
# Recognize the registered database multirange types.
def schema_column_type(db_type)
@pg_multirange_schema_types[db_type] || super
end
# Set the :ruby_default value if the default value is recognized as a multirange.
def schema_post_process(_)
super.each do |a|
h = a[1]
db_type = h[:db_type]
if @pg_multirange_schema_types[db_type] && h[:default] =~ /\A#{db_type}\(.*\)\z/
h[:ruby_default] = get(Sequel.lit(h[:default]))
end
end
end
# Given a value to typecast and the type of PGMultiRange subclass:
# * If given a PGMultiRange with a matching type, use it directly.
# * If given a PGMultiRange with a different type, return a PGMultiRange
# with the creator's type.
# * If given an Array, create a new PGMultiRange instance for it, typecasting
# each instance using the scalar_typecast_method.
def typecast_value_pg_multirange(value, creator, scalar_typecast_method=nil)
case value
when PGMultiRange
return value if value.db_type == creator.type
when Array
# nothing
else
raise Sequel::InvalidValue, "invalid value for multirange type: #{value.inspect}"
end
if scalar_typecast_method && respond_to?(scalar_typecast_method, true)
value = value.map{|v| send(scalar_typecast_method, v)}
end
PGMultiRange.new(value, creator.type)
end
end
# The type of this multirange (e.g. 'int4multirange').
attr_accessor :db_type
# Set the array of ranges to delegate to, and the database type.
def initialize(ranges, db_type)
super(ranges)
@db_type = db_type.to_s
end
# Append the multirange SQL to the given sql string.
def sql_literal_append(ds, sql)
sql << db_type << '('
joiner = nil
conversion_meth = nil
each do |range|
if joiner
sql << joiner
else
joiner = ', '
end
unless range.is_a?(PGRange)
conversion_meth ||= :"typecast_value_#{db_type.sub('multi', '')}"
range = ds.db.send(conversion_meth, range)
end
ds.literal_append(sql, range)
end
sql << ')'
end
# Return whether the value is inside any of the ranges in the multirange.
def cover?(value)
any?{|range| range.cover?(value)}
end
alias === cover?
# Don't consider multiranges with different database types equal.
def eql?(other)
if PGMultiRange === other
return false unless other.db_type == db_type
other = other.__getobj__
end
__getobj__.eql?(other)
end
# Don't consider multiranges with different database types equal.
def ==(other)
return false if PGMultiRange === other && other.db_type != db_type
super
end
# Return a string containing the unescaped version of the multirange.
# Separated out for use by the bound argument code.
def unquoted_literal(ds)
val = String.new
val << "{"
joiner = nil
conversion_meth = nil
each do |range|
if joiner
val << joiner
else
joiner = ', '
end
unless range.is_a?(PGRange)
conversion_meth ||= :"typecast_value_#{db_type.sub('multi', '')}"
range = ds.db.send(conversion_meth, range)
end
val << range.unquoted_literal(ds)
end
val << "}"
end
# Allow automatic parameterization.
def sequel_auto_param_type(ds)
"::#{db_type}"
end
end
end
module SQL::Builders
# Convert the object to a Postgres::PGMultiRange.
def pg_multirange(v, db_type)
case v
when Postgres::PGMultiRange
if v.db_type == db_type
v
else
Postgres::PGMultiRange.new(v, db_type)
end
when Array
Postgres::PGMultiRange.new(v, db_type)
else
# May not be defined unless the pg_range_ops extension is used
pg_range_op(v)
end
end
end
Database.register_extension(:pg_multirange, Postgres::PGMultiRange::DatabaseMethods)
end
sequel-5.63.0/lib/sequel/extensions/pg_range.rb 0000664 0000000 0000000 00000050222 14342141206 0021453 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_range extension adds support for the PostgreSQL 9.2+ range
# types to Sequel. PostgreSQL range types are similar to ruby's
# Range class, representating an array of values. However, they
# are more flexible than ruby's ranges, allowing exclusive beginnings
# and endings (ruby's range only allows exclusive endings).
#
# When PostgreSQL range values are retreived, they are parsed and returned
# as instances of Sequel::Postgres::PGRange. PGRange mostly acts
# like a Range, but it's not a Range as not all PostgreSQL range
# type values would be valid ruby ranges. If the range type value
# you are using is a valid ruby range, you can call PGRange#to_range
# to get a Range. However, if you call PGRange#to_range on a range
# type value uses features that ruby's Range does not support, an
# exception will be raised.
#
# In addition to the parser, this extension comes with literalizers
# for PGRange and Range, so they can be used in queries and as bound variables.
#
# To turn an existing Range into a PGRange, use Sequel.pg_range:
#
# Sequel.pg_range(range)
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Range#pg_range:
#
# range.pg_range
#
# You may want to specify a specific range type:
#
# Sequel.pg_range(range, :daterange)
# range.pg_range(:daterange)
#
# If you specify the range database type, Sequel will automatically cast
# the value to that type when literalizing.
#
# To use this extension, load it into the Database instance:
#
# DB.extension :pg_range
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using range type columns in CREATE/ALTER TABLE statements.
#
# This extension makes it easy to add support for other range types. In
# general, you just need to make sure that the subtype is handled and has the
# appropriate converter installed. For user defined
# types, you can do this via:
#
# DB.add_conversion_proc(subtype_oid){|string| }
#
# Then you can call
# Sequel::Postgres::PGRange::DatabaseMethods#register_range_type
# to automatically set up a handler for the range type. So if you
# want to support the timerange type (assuming the time type is already
# supported):
#
# DB.register_range_type('timerange')
#
# This extension integrates with the pg_array extension. If you plan
# to use arrays of range types, load the pg_array extension before the
# pg_range extension:
#
# DB.extension :pg_array, :pg_range
#
# Related module: Sequel::Postgres::PGRange
module Sequel
module Postgres
class PGRange
include Sequel::SQL::AliasMethods
# Creates callable objects that convert strings into PGRange instances.
class Parser
# The database range type for this parser (e.g. 'int4range'),
# automatically setting the db_type for the returned PGRange instances.
attr_reader :db_type
# A callable object to convert the beginning and ending of the range into
# the appropriate ruby type.
attr_reader :converter
# Set the db_type and converter on initialization.
def initialize(db_type, converter=nil)
@db_type = db_type.to_s.dup.freeze if db_type
@converter = converter
end
# Parse the range type input string into a PGRange value.
def call(string)
if string == 'empty'
return PGRange.empty(db_type)
end
raise(InvalidValue, "invalid or unhandled range format: #{string.inspect}") unless matches = /\A(\[|\()("((?:\\"|[^"])*)"|[^"]*),("((?:\\"|[^"])*)"|[^"]*)(\]|\))\z/.match(string)
exclude_begin = matches[1] == '('
exclude_end = matches[6] == ')'
# If the input is quoted, it needs to be unescaped. Also, quoted input isn't
# checked for emptiness, since the empty quoted string is considered an
# element that happens to be the empty string, while an unquoted empty string
# is considered unbounded.
#
# While PostgreSQL allows pure escaping for input (without quoting), it appears
# to always use the quoted output form when characters need to be escaped, so
# there isn't a need to unescape unquoted output.
if beg = matches[3]
beg.gsub!(/\\(.)/, '\1')
else
beg = matches[2] unless matches[2].empty?
end
if en = matches[5]
en.gsub!(/\\(.)/, '\1')
else
en = matches[4] unless matches[4].empty?
end
if c = converter
beg = c.call(beg) if beg
en = c.call(en) if en
end
PGRange.new(beg, en, :exclude_begin=>exclude_begin, :exclude_end=>exclude_end, :db_type=>db_type)
end
end
module DatabaseMethods
# Add the conversion procs to the database
# and extend the datasets to correctly literalize ruby Range values.
def self.extended(db)
db.instance_exec do
@pg_range_schema_types ||= {}
extend_datasets(DatasetMethods)
register_range_type('int4range', :oid=>3904, :subtype_oid=>23)
register_range_type('numrange', :oid=>3906, :subtype_oid=>1700)
register_range_type('tsrange', :oid=>3908, :subtype_oid=>1114)
register_range_type('tstzrange', :oid=>3910, :subtype_oid=>1184)
register_range_type('daterange', :oid=>3912, :subtype_oid=>1082)
register_range_type('int8range', :oid=>3926, :subtype_oid=>20)
if respond_to?(:register_array_type)
register_array_type('int4range', :oid=>3905, :scalar_oid=>3904, :scalar_typecast=>:int4range)
register_array_type('numrange', :oid=>3907, :scalar_oid=>3906, :scalar_typecast=>:numrange)
register_array_type('tsrange', :oid=>3909, :scalar_oid=>3908, :scalar_typecast=>:tsrange)
register_array_type('tstzrange', :oid=>3911, :scalar_oid=>3910, :scalar_typecast=>:tstzrange)
register_array_type('daterange', :oid=>3913, :scalar_oid=>3912, :scalar_typecast=>:daterange)
register_array_type('int8range', :oid=>3927, :scalar_oid=>3926, :scalar_typecast=>:int8range)
end
[:int4range, :numrange, :tsrange, :tstzrange, :daterange, :int8range].each do |v|
@schema_type_classes[v] = PGRange
end
procs = conversion_procs
add_conversion_proc(3908, Parser.new("tsrange", procs[1114]))
add_conversion_proc(3910, Parser.new("tstzrange", procs[1184]))
if respond_to?(:register_array_type) && defined?(PGArray::Creator)
add_conversion_proc(3909, PGArray::Creator.new("tsrange", procs[3908]))
add_conversion_proc(3911, PGArray::Creator.new("tstzrange", procs[3910]))
end
end
end
# Handle Range and PGRange values in bound variables
def bound_variable_arg(arg, conn)
case arg
when PGRange
arg.unquoted_literal(schema_utility_dataset)
when Range
PGRange.from_range(arg).unquoted_literal(schema_utility_dataset)
else
super
end
end
# Freeze the pg range schema types to prevent adding new ones.
def freeze
@pg_range_schema_types.freeze
super
end
# Register a database specific range type. This can be used to support
# different range types per Database. Options:
#
# :converter :: A callable object (e.g. Proc), that is called with the start or end of the range
# (usually a string), and should return the appropriate typecasted object.
# :oid :: The PostgreSQL OID for the range type. This is used by the Sequel postgres adapter
# to set up automatic type conversion on retrieval from the database.
# :subtype_oid :: Should be the PostgreSQL OID for the range's subtype. If given,
# automatically sets the :converter option by looking for scalar conversion
# proc.
#
# If a block is given, it is treated as the :converter option.
def register_range_type(db_type, opts=OPTS, &block)
oid = opts[:oid]
soid = opts[:subtype_oid]
if has_converter = opts.has_key?(:converter)
raise Error, "can't provide both a block and :converter option to register_range_type" if block
converter = opts[:converter]
else
has_converter = true if block
converter = block
end
unless (soid || has_converter) && oid
range_oid, subtype_oid = from(:pg_range).join(:pg_type, :oid=>:rngtypid).where(:typname=>db_type.to_s).get([:rngtypid, :rngsubtype])
soid ||= subtype_oid unless has_converter
oid ||= range_oid
end
db_type = db_type.to_s.dup.freeze
if soid
raise Error, "can't provide both a converter and :subtype_oid option to register" if has_converter
raise Error, "no conversion proc for :subtype_oid=>#{soid.inspect} in conversion_procs" unless converter = conversion_procs[soid]
end
parser = Parser.new(db_type, converter)
add_conversion_proc(oid, parser)
@pg_range_schema_types[db_type] = db_type.to_sym
singleton_class.class_eval do
meth = :"typecast_value_#{db_type}"
define_method(meth){|v| typecast_value_pg_range(v, parser)}
private meth
end
@schema_type_classes[:"#{opts[:type_symbol] || db_type}"] = PGRange
nil
end
private
# Recognize the registered database range types.
def schema_column_type(db_type)
@pg_range_schema_types[db_type] || super
end
# Set the :ruby_default value if the default value is recognized as a range.
def schema_post_process(_)
super.each do |a|
h = a[1]
db_type = h[:db_type]
if @pg_range_schema_types[db_type] && h[:default] =~ /\A'([^']+)'::#{db_type}\z/
default = $1
if convertor = conversion_procs[h[:oid]]
h[:ruby_default] = convertor.call(default)
end
end
end
end
# Typecast value correctly to a PGRange. If already an
# PGRange instance with the same db_type, return as is.
# If a PGRange with a different subtype, return a new
# PGRange with the same values and the expected subtype.
# If a Range object, create a PGRange with the given
# db_type. If a string, assume it is in PostgreSQL
# output format and parse it using the parser.
def typecast_value_pg_range(value, parser)
case value
when PGRange
if value.db_type.to_s == parser.db_type
value
elsif value.empty?
PGRange.empty(parser.db_type)
else
PGRange.new(value.begin, value.end, :exclude_begin=>value.exclude_begin?, :exclude_end=>value.exclude_end?, :db_type=>parser.db_type)
end
when Range
PGRange.from_range(value, parser.db_type)
when String
parser.call(typecast_check_string_length(value, 100))
else
raise Sequel::InvalidValue, "invalid value for range type: #{value.inspect}"
end
end
end
module DatasetMethods
private
# Handle literalization of ruby Range objects, treating them as
# PostgreSQL ranges.
def literal_other_append(sql, v)
case v
when Range
super(sql, Sequel::Postgres::PGRange.from_range(v))
else
super
end
end
end
include Enumerable
# The beginning of the range. If nil, the range has an unbounded beginning.
attr_reader :begin
# The end of the range. If nil, the range has an unbounded ending.
attr_reader :end
# The PostgreSQL database type for the range (e.g. 'int4range').
attr_reader :db_type
# Create a new PGRange instance using the beginning and ending of the ruby Range,
# with the given db_type.
def self.from_range(range, db_type=nil)
new(range.begin, range.end, :exclude_end=>range.exclude_end?, :db_type=>db_type)
end
# Create an empty PGRange with the given database type.
def self.empty(db_type=nil)
new(nil, nil, :empty=>true, :db_type=>db_type)
end
# Initialize a new PGRange instance. Accepts the following options:
#
# :db_type :: The PostgreSQL database type for the range.
# :empty :: Whether the range is empty (has no points)
# :exclude_begin :: Whether the beginning element is excluded from the range.
# :exclude_end :: Whether the ending element is excluded from the range.
def initialize(beg, en, opts=OPTS)
@begin = beg
@end = en
@empty = !!opts[:empty]
@exclude_begin = !!opts[:exclude_begin]
@exclude_end = !!opts[:exclude_end]
@db_type = opts[:db_type]
if @empty
raise(Error, 'cannot have an empty range with either a beginning or ending') unless @begin.nil? && @end.nil? && opts[:exclude_begin].nil? && opts[:exclude_end].nil?
end
end
# Delegate to the ruby range object so that the object mostly acts like a range.
range_methods = %w'each last first step'
range_methods.each do |m|
class_eval("def #{m}(*a, &block) to_range.#{m}(*a, &block) end", __FILE__, __LINE__)
end
# Return whether the value is inside the range.
def cover?(value)
return false if empty?
b = self.begin
return false if b && b.public_send(exclude_begin? ? :>= : :>, value)
e = self.end
return false if e && e.public_send(exclude_end? ? :<= : :<, value)
true
end
# Consider the receiver equal to other PGRange instances with the
# same beginning, ending, exclusions, and database type. Also consider
# it equal to Range instances if this PGRange can be converted to a
# a Range and those ranges are equal.
def eql?(other)
case other
when PGRange
if db_type == other.db_type
if empty?
other.empty?
elsif other.empty?
false
else
[:@begin, :@end, :@exclude_begin, :@exclude_end].all?{|v| instance_variable_get(v) == other.instance_variable_get(v)}
end
else
false
end
when Range
if valid_ruby_range?
to_range.eql?(other)
else
false
end
else
false
end
end
alias == eql?
# Make sure equal ranges have the same hash.
def hash
if @empty
@db_type.hash
else
[@begin, @end, @exclude_begin, @exclude_end, @db_type].hash
end
end
# Allow PGRange values in case statements, where they return true if they
# are equal to each other using eql?, or if this PGRange can be converted
# to a Range, delegating to that range.
def ===(other)
if eql?(other)
true
else
if valid_ruby_range?
to_range === other
else
false
end
end
end
# Whether this range is empty (has no points). Note that for manually created ranges
# (ones not retrieved from the database), this will only be true if the range
# was created using the :empty option.
def empty?
@empty
end
# Whether the beginning element is excluded from the range.
def exclude_begin?
@exclude_begin
end
# Whether the ending element is excluded from the range.
def exclude_end?
@exclude_end
end
# Append a literalize version of the receiver to the sql.
def sql_literal_append(ds, sql)
if (s = @db_type) && !empty?
sql << s.to_s << "("
ds.literal_append(sql, self.begin)
sql << ','
ds.literal_append(sql, self.end)
sql << ','
ds.literal_append(sql, "#{exclude_begin? ? "(" : "["}#{exclude_end? ? ")" : "]"}")
sql << ")"
else
ds.literal_append(sql, unquoted_literal(ds))
if s
sql << '::' << s.to_s
end
end
end
ENDLESS_RANGE_NOT_SUPPORTED = RUBY_VERSION < '2.6'
STARTLESS_RANGE_NOT_SUPPORTED = RUBY_VERSION < '2.7'
# Return a ruby Range object for this instance, if one can be created.
def to_range
return @range if @range
raise(Error, "cannot create ruby range for an empty PostgreSQL range") if empty?
raise(Error, "cannot create ruby range when PostgreSQL range excludes beginning element") if exclude_begin?
# :nocov:
raise(Error, "cannot create ruby range when PostgreSQL range has unbounded beginning") if STARTLESS_RANGE_NOT_SUPPORTED && !self.begin
raise(Error, "cannot create ruby range when PostgreSQL range has unbounded ending") if ENDLESS_RANGE_NOT_SUPPORTED && !self.end
# :nocov:
@range = Range.new(self.begin, self.end, exclude_end?)
end
# Whether or not this PGRange is a valid ruby range. In order to be a valid ruby range,
# it must have a beginning and an ending (no unbounded ranges), and it cannot exclude
# the beginning element.
def valid_ruby_range?
!(empty? || exclude_begin? || (STARTLESS_RANGE_NOT_SUPPORTED && !self.begin) || (ENDLESS_RANGE_NOT_SUPPORTED && !self.end))
end
# Whether the beginning of the range is unbounded.
def unbounded_begin?
self.begin.nil? && !empty?
end
# Whether the end of the range is unbounded.
def unbounded_end?
self.end.nil? && !empty?
end
# Return a string containing the unescaped version of the range.
# Separated out for use by the bound argument code.
def unquoted_literal(ds)
if empty?
'empty'
else
"#{exclude_begin? ? "(" : "["}#{escape_value(self.begin, ds)},#{escape_value(self.end, ds)}#{exclude_end? ? ")" : "]"}"
end
end
# Allow automatic parameterization for ranges with types.
def sequel_auto_param_type(ds)
"::#{db_type}" if db_type
end
private
# Escape common range types. Instead of quoting, just backslash escape all
# special characters.
def escape_value(k, ds)
case k
when nil
''
when Date, Time
ds.literal(k)[1...-1]
when Integer, Float
k.to_s
when BigDecimal
k.to_s('F')
when LiteralString
k
when String
if k.empty?
'""'
else
k.gsub(/("|,|\\|\[|\]|\(|\))/, '\\\\\1')
end
else
ds.literal(k).gsub(/("|,|\\|\[|\]|\(|\))/, '\\\\\1')
end
end
end
end
module SQL::Builders
# Convert the object to a Postgres::PGRange.
def pg_range(v, db_type=nil)
case v
when Postgres::PGRange
if db_type.nil? || v.db_type == db_type
v
else
Postgres::PGRange.new(v.begin, v.end, :exclude_begin=>v.exclude_begin?, :exclude_end=>v.exclude_end?, :db_type=>db_type)
end
when Range
Postgres::PGRange.from_range(v, db_type)
else
# May not be defined unless the pg_range_ops extension is used
pg_range_op(v)
end
end
end
Database.register_extension(:pg_range, Postgres::PGRange::DatabaseMethods)
end
# :nocov:
if Sequel.core_extensions?
class Range
# Create a new PGRange using the receiver as the input range,
# with the given database type.
def pg_range(db_type=nil)
Sequel::Postgres::PGRange.from_range(self, db_type)
end
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Range do
def pg_range(db_type=nil)
Sequel::Postgres::PGRange.from_range(self, db_type)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_range_ops.rb 0000664 0000000 0000000 00000014141 14342141206 0022334 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_range_ops extension adds support to Sequel's DSL to make
# it easier to call PostgreSQL range and multirange functions and operators.
#
# To load the extension:
#
# Sequel.extension :pg_range_ops
#
# The most common usage is passing an expression to Sequel.pg_range_op:
#
# r = Sequel.pg_range_op(:range)
#
# If you have also loaded the pg_range or pg_multirange extensions, you can use
# Sequel.pg_range or Sequel.pg_multirange as well:
#
# r = Sequel.pg_range(:range)
# r = Sequel.pg_multirange(:range)
#
# Also, on most Sequel expression objects, you can call the pg_range
# method:
#
# r = Sequel[:range].pg_range
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#pg_range:
#
# r = :range.pg_range
#
# This creates a Sequel::Postgres::RangeOp object that can be used
# for easier querying:
#
# r.contains(:other) # range @> other
# r.contained_by(:other) # range <@ other
# r.overlaps(:other) # range && other
# r.left_of(:other) # range << other
# r.right_of(:other) # range >> other
# r.starts_after(:other) # range &> other
# r.ends_before(:other) # range &< other
# r.adjacent_to(:other) # range -|- other
#
# r.lower # lower(range)
# r.upper # upper(range)
# r.isempty # isempty(range)
# r.lower_inc # lower_inc(range)
# r.upper_inc # upper_inc(range)
# r.lower_inf # lower_inf(range)
# r.upper_inf # upper_inf(range)
#
# All of the above methods work for both ranges and multiranges, as long
# as PostgreSQL supports the operation. The following methods are also
# supported:
#
# r.range_merge # range_merge(range)
# r.unnest # unnest(range)
# r.multirange # multirange(range)
#
# +range_merge+ and +unnest+ expect the receiver to represent a multirange
# value, while +multi_range+ expects the receiver to represent a range value.
#
# See the PostgreSQL range and multirange function and operator documentation for more
# details on what these functions and operators do.
#
# If you are also using the pg_range or pg_multirange extension, you should
# load them before loading this extension. Doing so will allow you to use
# PGRange#op and PGMultiRange#op to get a RangeOp, allowing you to perform
# range operations on range literals.
#
# Related module: Sequel::Postgres::RangeOp
#
module Sequel
module Postgres
# The RangeOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# PostgreSQL range operators and functions.
#
# Most methods in this class are defined via metaprogramming, see
# the pg_range_ops extension documentation for details on the API.
class RangeOp < Sequel::SQL::Wrapper
OPERATORS = {
:contains => ["(".freeze, " @> ".freeze, ")".freeze].freeze,
:contained_by => ["(".freeze, " <@ ".freeze, ")".freeze].freeze,
:left_of => ["(".freeze, " << ".freeze, ")".freeze].freeze,
:right_of => ["(".freeze, " >> ".freeze, ")".freeze].freeze,
:ends_before => ["(".freeze, " &< ".freeze, ")".freeze].freeze,
:starts_after => ["(".freeze, " &> ".freeze, ")".freeze].freeze,
:adjacent_to => ["(".freeze, " -|- ".freeze, ")".freeze].freeze,
:overlaps => ["(".freeze, " && ".freeze, ")".freeze].freeze,
}.freeze
%w'lower upper isempty lower_inc upper_inc lower_inf upper_inf unnest'.each do |f|
class_eval("def #{f}; function(:#{f}) end", __FILE__, __LINE__)
end
%w'range_merge multirange'.each do |f|
class_eval("def #{f}; RangeOp.new(function(:#{f})) end", __FILE__, __LINE__)
end
OPERATORS.each_key do |f|
class_eval("def #{f}(v); operator(:#{f}, v) end", __FILE__, __LINE__)
end
# These operators are already supported by the wrapper, but for ranges they
# return ranges, so wrap the results in another RangeOp.
%w'+ * -'.each do |f|
class_eval("def #{f}(v); RangeOp.new(super) end", __FILE__, __LINE__)
end
# Return the receiver.
def pg_range
self
end
private
# Create a boolen expression for the given type and argument.
def operator(type, other)
Sequel::SQL::BooleanExpression.new(:NOOP, Sequel::SQL::PlaceholderLiteralString.new(OPERATORS[type], [value, other]))
end
# Return a function called with the receiver.
def function(name)
Sequel::SQL::Function.new(name, self)
end
end
module RangeOpMethods
# Wrap the receiver in an RangeOp so you can easily use the PostgreSQL
# range functions and operators with it.
def pg_range
RangeOp.new(self)
end
end
# :nocov:
if defined?(PGRange)
# :nocov:
class PGRange
# Wrap the PGRange instance in an RangeOp, allowing you to easily use
# the PostgreSQL range functions and operators with literal ranges.
def op
RangeOp.new(self)
end
end
end
# :nocov:
if defined?(PGMultiRange)
# :nocov:
class PGMultiRange
# Wrap the PGRange instance in an RangeOp, allowing you to easily use
# the PostgreSQL range functions and operators with literal ranges.
def op
RangeOp.new(self)
end
end
end
end
module SQL::Builders
# Return the expression wrapped in the Postgres::RangeOp.
def pg_range_op(v)
case v
when Postgres::RangeOp
v
else
Postgres::RangeOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::Postgres::RangeOpMethods
end
class LiteralString
include Sequel::Postgres::RangeOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::RangeOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::RangeOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_row.rb 0000664 0000000 0000000 00000050760 14342141206 0021175 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_row extension adds support for Sequel to handle
# PostgreSQL's row-valued/composite types.
#
# This extension integrates with Sequel's native postgres and jdbc/postgresql adapters, so
# that when composite fields are retrieved, they are parsed and returned
# as instances of Sequel::Postgres::PGRow::(HashRow|ArrayRow), or
# optionally a custom type. HashRow and ArrayRow are DelegateClasses of
# Hash and Array, so they mostly act like a hash or array, but not
# completely (is_a?(Hash) and is_a?(Array) are false). If you want the
# actual hash for a HashRow, call HashRow#to_hash, and if you want the
# actual array for an ArrayRow, call ArrayRow#to_a. This is done so
# that Sequel does not treat a values like an Array or Hash by default,
# which would cause issues.
#
# In addition to the parsers, this extension comes with literalizers
# for HashRow and ArrayRow using the standard Sequel literalization callbacks, so
# they work with on all adapters.
#
# To use this extension, first load it into the Database instance:
#
# DB.extension :pg_row
#
# If you plan to use arrays of composite types, make sure you load the
# pg_array extension first:
#
# DB.extension :pg_array, :pg_row
#
# You can create an anonymous row type by calling the Sequel.pg_row with
# an array:
#
# Sequel.pg_row(array)
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Array#pg_row:
#
# array.pg_row
#
# However, in most cases you are going to want something beyond anonymous
# row types. This extension allows you to register row types on a per
# database basis, using Database#register_row_type:
#
# DB.register_row_type(:foo)
#
# When you register the row type, Sequel will query the PostgreSQL
# system tables to find the related metadata, and will setup
# a custom HashRow subclass for that type. This includes looking up
# conversion procs for each column in the type, so that when the composite
# type is returned from the database, the members of the type have
# the correct type. Additionally, if the composite type also has an
# array form, Sequel registers an array type for the composite type,
# so that array columns of the composite type are converted correctly.
#
# You can then create values of that type by using Database#row_type:
#
# DB.row_type(:address, ['123 Sesame St.', 'Some City', '12345'])
#
# Let's say table address has columns street, city, and zip. This would return
# something similar to:
#
# {:street=>'123 Sesame St.', :city=>'Some City', :zip=>'12345'}
#
# You can also use a hash:
#
# DB.row_type(:address, street: '123 Sesame St.', city: 'Some City', zip: '12345')
#
# So if you have a person table that has an address column, here's how you
# could insert into the column:
#
# DB[:table].insert(address: DB.row_type(:address, street: '123 Sesame St.', city: 'Some City', zip: '12345'))
#
# Note that registering row types without providing an explicit :converter option
# creates anonymous classes. This results in ruby being unable to Marshal such
# objects. You can work around this by assigning the anonymous class to a constant.
# To get a list of such anonymous classes, you can use the following code:
#
# DB.conversion_procs.select{|k,v| v.is_a?(Sequel::Postgres::PGRow::Parser) && \
# v.converter && (v.converter.name.nil? || v.converter.name == '') }.map{|k,v| v}
#
# See the {schema modification guide}[rdoc-ref:doc/schema_modification.rdoc]
# for details on using row type columns in CREATE/ALTER TABLE statements.
#
# This extension requires both the strscan and delegate libraries.
#
# Related module: Sequel::Postgres::PGRow
require 'delegate'
require 'strscan'
module Sequel
module Postgres
module PGRow
# Class for row-valued/composite types that are treated as arrays. By default,
# this is only used for generic PostgreSQL record types, as registered
# types use HashRow by default.
class ArrayRow < DelegateClass(Array)
include Sequel::SQL::AliasMethods
class << self
# The database type for this class. May be nil if this class
# done not have a specific database type.
attr_accessor :db_type
# Alias new to call, so that the class itself can be used
# directly as a converter.
alias call new
end
# Create a subclass associated with a specific database type.
# This is done so that instances of this subclass are
# automatically casted to the database type when literalizing.
def self.subclass(db_type)
Class.new(self) do
@db_type = db_type
end
end
# Sets the database type associated with this instance. This is
# used to override the class's default database type.
attr_writer :db_type
# Return the instance's database type, or the class's database
# type if the instance has not overridden it.
def db_type
@db_type || self.class.db_type
end
# Append SQL fragment related to this object to the sql.
def sql_literal_append(ds, sql)
sql << 'ROW'
ds.literal_append(sql, to_a)
if db_type
sql << '::'
ds.quote_schema_table_append(sql, db_type)
end
end
# Allow automatic parameterization if all values support it.
def sequel_auto_param_type(ds)
if db_type && all?{|v| nil == v || ds.send(:auto_param_type, v)}
s = String.new << "::"
ds.quote_schema_table_append(s, db_type)
s
end
end
end
# Class for row-valued/composite types that are treated as hashes.
# Types registered via Database#register_row_type will use this
# class by default.
class HashRow < DelegateClass(Hash)
include Sequel::SQL::AliasMethods
class << self
# The columns associated with this class.
attr_accessor :columns
# The database type for this class. May be nil if this class
# done not have a specific database type.
attr_accessor :db_type
# Alias new to call, so that the class itself can be used
# directly as a converter.
alias call new
end
# Create a new subclass of this class with the given database
# type and columns.
def self.subclass(db_type, columns)
Class.new(self) do
@db_type = db_type
@columns = columns
end
end
# Return the underlying hash for this delegate object.
alias to_hash __getobj__
# Sets the columns associated with this instance. This is
# used to override the class's default columns.
attr_writer :columns
# Sets the database type associated with this instance. This is
# used to override the class's default database type.
attr_writer :db_type
# Return the instance's columns, or the class's columns
# if the instance has not overridden it.
def columns
@columns || self.class.columns
end
# Return the instance's database type, or the class's columns
# if the instance has not overridden it.
def db_type
@db_type || self.class.db_type
end
# Check that the HashRow has valid columns. This should be used
# before all attempts to literalize the object, since literalization
# depends on the columns to get the column order.
def check_columns!
if columns.nil? || columns.empty?
raise Error, 'cannot literalize HashRow without columns'
end
end
# Append SQL fragment related to this object to the sql.
def sql_literal_append(ds, sql)
check_columns!
sql << 'ROW'
ds.literal_append(sql, values_at(*columns))
if db_type
sql << '::'
ds.quote_schema_table_append(sql, db_type)
end
end
# Allow automatic parameterization if all values support it.
def sequel_auto_param_type(ds)
if db_type && all?{|_,v| nil == v || ds.send(:auto_param_type, v)}
s = String.new << "::"
ds.quote_schema_table_append(s, db_type)
s
end
end
end
ROW_TYPE_CLASSES = [HashRow, ArrayRow].freeze
# This parser-like class splits the PostgreSQL
# row-valued/composite type output string format
# into an array of strings. Note this class makes
# no attempt to handle all input formats that PostgreSQL
# will accept, it only handles the output format that
# PostgreSQL uses.
class Splitter < StringScanner
# Split the stored string into an array of strings, handling
# the different types of quoting.
def parse
values = []
skip(/\(/)
if skip(/\)/)
values << nil
else
# :nocov:
until eos?
# :nocov:
if skip(/"/)
values << scan(/(\\.|""|[^"])*/).gsub(/\\(.)|"(")/, '\1\2')
skip(/"[,)]/)
else
v = scan(/[^,)]*/)
values << (v unless v.empty?)
skip(/[,)]/)
end
end
end
values
end
end
# The Parser is responsible for taking the input string
# from PostgreSQL, and returning an appropriate ruby
# object that the type represents, such as an ArrayRow or
# HashRow.
class Parser
# The columns for the parser, if any. If the parser has
# no columns, it will treat the input as an array. If
# it has columns, it will treat the input as a hash.
# If present, should be an array of strings.
attr_reader :columns
# Converters for each member in the composite type. If
# not present, no conversion will be done, so values will
# remain strings. If present, should be an array of
# callable objects.
attr_reader :column_converters
# The OIDs for each member in the composite type. Not
# currently used, but made available for user code.
attr_reader :column_oids
# A converter for the object as a whole. Used to wrap
# the returned array/hash in another object, such as an
# ArrayRow or HashRow. If present, should be callable.
attr_reader :converter
# The oid for the composite type itself.
attr_reader :oid
# A callable object used for typecasting the object. This
# is similar to the converter, but it is called by the
# typecasting code, which has different assumptions than
# the converter. For instance, the converter should be
# called with all of the member values already typecast,
# but the typecaster may not be.
attr_reader :typecaster
# Sets each of the parser's attributes, using options with
# the same name (e.g. :columns sets the columns attribute).
def initialize(h=OPTS)
@columns = h[:columns]
@column_converters = h[:column_converters]
@column_oids = h[:column_oids]
@converter = h[:converter]
@typecaster = h[:typecaster]
@oid = h[:oid]
end
# Convert the PostgreSQL composite type input format into
# an appropriate ruby object.
def call(s)
convert(convert_format(convert_columns(Splitter.new(s).parse)))
end
# Typecast the given object to the appropriate type using the
# typecaster. Note that this does not conversion for the members
# of the composite type, since those conversion expect strings and
# strings may not be provided.
def typecast(obj)
case obj
when Array
_typecast(convert_format(obj))
when Hash
unless @columns
raise Error, 'PGRow::Parser without columns cannot typecast from a hash'
end
_typecast(obj)
else
raise Error, 'PGRow::Parser can only typecast arrays and hashes'
end
end
private
# If the parser has a typecaster, call it with
# the object, otherwise return the object as is.
def _typecast(obj)
if t = @typecaster
t.call(obj)
else
obj
end
end
# If the parser has column converters, map the
# array of strings input to a array of appropriate
# ruby objects, one for each converter.
def convert_columns(arr)
if ccs = @column_converters
arr.zip(ccs).map{|v, pr| (v && pr) ? pr.call(v) : v}
else
arr
end
end
# If the parser has columns, return a hash assuming
# that the array is ordered by the columns.
def convert_format(arr)
if cs = @columns
h = {}
arr.zip(cs).each{|v, c| h[c] = v}
h
else
arr
end
end
# If the parser has a converter, call it with the object,
# otherwise return the object as is.
def convert(obj)
if c = @converter
c.call(obj)
else
obj
end
end
end
module DatabaseMethods
# A hash mapping row type keys (usually symbols), to option
# hashes. At the least, the values will contain the :parser
# option for the Parser instance that the type will use.
attr_reader :row_types
# Do some setup for the data structures the module uses.
def self.extended(db)
db.instance_exec do
@row_types = {}
@row_schema_types = {}
extend(@row_type_method_module = Module.new)
add_conversion_proc(2249, PGRow::Parser.new(:converter=>PGRow::ArrayRow))
if respond_to?(:register_array_type)
register_array_type('record', :oid=>2287, :scalar_oid=>2249)
end
end
end
# Handle ArrayRow and HashRow values in bound variables.
def bound_variable_arg(arg, conn)
case arg
when ArrayRow
"(#{arg.map{|v| bound_variable_array(v) if v}.join(',')})"
when HashRow
arg.check_columns!
"(#{arg.values_at(*arg.columns).map{|v| bound_variable_array(v) if v}.join(',')})"
else
super
end
end
# Freeze the row types and row schema types to prevent adding new ones.
def freeze
@row_types.freeze
@row_schema_types.freeze
@row_type_method_module.freeze
super
end
# Register a new row type for the Database instance. db_type should be the type
# symbol. This parses the PostgreSQL system tables to get information the
# composite type, and by default has the type return instances of a subclass
# of HashRow.
#
# The following options are supported:
#
# :converter :: Use a custom converter for the parser.
# :typecaster :: Use a custom typecaster for the parser.
def register_row_type(db_type, opts=OPTS)
procs = @conversion_procs
rel_oid = nil
array_oid = nil
parser_opts = {}
# Try to handle schema-qualified types.
type_schema, type_name = schema_and_table(db_type)
schema_type_string = type_name.to_s
# Get basic oid information for the composite type.
ds = from(:pg_type).
select{[pg_type[:oid], :typrelid, :typarray]}.
where([[:typtype, 'c'], [:typname, type_name.to_s]])
if type_schema
ds = ds.join(:pg_namespace, [[:oid, :typnamespace], [:nspname, type_schema.to_s]])
schema_type_symbol = :"pg_row_#{type_schema}__#{type_name}"
else
schema_type_symbol = :"pg_row_#{type_name}"
end
unless row = ds.first
raise Error, "row type #{db_type.inspect} not found in database"
end
# Manually cast to integer using to_i, because adapter may not cast oid type
# correctly (e.g. swift)
parser_opts[:oid], rel_oid, array_oid = row.values_at(:oid, :typrelid, :typarray).map(&:to_i)
# Get column names and oids for each of the members of the composite type.
res = from(:pg_attribute).
join(:pg_type, :oid=>:atttypid).
where(:attrelid=>rel_oid).
where{attnum > 0}.
exclude(:attisdropped).
order(:attnum).
select_map{[:attname, Sequel.case({0=>:atttypid}, pg_type[:typbasetype], pg_type[:typbasetype]).as(:atttypid)]}
if res.empty?
raise Error, "no columns for row type #{db_type.inspect} in database"
end
parser_opts[:columns] = res.map{|r| r[0].to_sym}
parser_opts[:column_oids] = res.map{|r| r[1].to_i}
# Using the conversion_procs, lookup converters for each member of the composite type
parser_opts[:column_converters] = parser_opts[:column_oids].map do |oid|
procs[oid]
end
# Setup the converter and typecaster
parser_opts[:converter] = opts.fetch(:converter){HashRow.subclass(db_type, parser_opts[:columns])}
parser_opts[:typecaster] = opts.fetch(:typecaster, parser_opts[:converter])
parser = Parser.new(parser_opts)
add_conversion_proc(parser.oid, parser)
if respond_to?(:register_array_type) && array_oid && array_oid > 0
array_type_name = if type_schema
"#{type_schema}.#{type_name}"
else
type_name
end
register_array_type(array_type_name, :oid=>array_oid, :converter=>parser, :scalar_typecast=>schema_type_symbol)
end
@row_types[literal(db_type)] = opts.merge(:parser=>parser, :type=>db_type)
@row_schema_types[schema_type_string] = schema_type_symbol
@schema_type_classes[schema_type_symbol] = ROW_TYPE_CLASSES
@row_type_method_module.class_eval do
meth = :"typecast_value_#{schema_type_symbol}"
define_method(meth) do |v|
row_type(db_type, v)
end
private meth
alias_method(meth, meth)
end
nil
end
# Handle typecasting of the given object to the given database type.
# In general, the given database type should already be registered,
# but if obj is an array, this will handled unregistered types.
def row_type(db_type, obj)
(type_hash = @row_types[literal(db_type)]) &&
(parser = type_hash[:parser])
case obj
when ArrayRow, HashRow
obj
when Array
if parser
parser.typecast(obj)
else
obj = ArrayRow.new(obj)
obj.db_type = db_type
obj
end
when Hash
if parser
parser.typecast(obj)
else
raise InvalidValue, "Database#row_type requires the #{db_type.inspect} type have a registered parser and typecaster when called with a hash"
end
else
raise InvalidValue, "cannot convert #{obj.inspect} to row type #{db_type.inspect}"
end
end
private
# Make the column type detection handle registered row types.
def schema_column_type(db_type)
if type = @row_schema_types[db_type]
type
else
super
end
end
end
end
end
module SQL::Builders
# Wraps the expr array in an anonymous Postgres::PGRow::ArrayRow instance.
def pg_row(expr)
case expr
when Array
Postgres::PGRow::ArrayRow.new(expr)
else
# Will only work if pg_row_ops extension is loaded
pg_row_op(expr)
end
end
end
Database.register_extension(:pg_row, Postgres::PGRow::DatabaseMethods)
end
# :nocov:
if Sequel.core_extensions?
class Array
# Wraps the receiver in an anonymous Sequel::Postgres::PGRow::ArrayRow instance.
def pg_row
Sequel::Postgres::PGRow::ArrayRow.new(self)
end
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Array do
def pg_row
Sequel::Postgres::PGRow::ArrayRow.new(self)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_row_ops.rb 0000664 0000000 0000000 00000014722 14342141206 0022054 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_row_ops extension adds support to Sequel's DSL to make
# it easier to deal with PostgreSQL row-valued/composite types.
#
# To load the extension:
#
# Sequel.extension :pg_row_ops
#
# The most common usage is passing an expression to Sequel.pg_row_op:
#
# r = Sequel.pg_row_op(:row_column)
#
# If you have also loaded the pg_row extension, you can use
# Sequel.pg_row as well:
#
# r = Sequel.pg_row(:row_column)
#
# Also, on most Sequel expression objects, you can call the pg_row
# method:
#
# r = Sequel[:row_column].pg_row
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#pg_row:
#
# r = :row_column.pg_row
#
# There's only fairly basic support currently. You can use the [] method to access
# a member of the composite type:
#
# r[:a] # (row_column).a
#
# This can be chained:
#
# r[:a][:b] # ((row_column).a).b
#
# If you've loaded the pg_array_ops extension, you there is also support for composite
# types that include arrays, or arrays of composite types:
#
# r[1][:a] # (row_column[1]).a
# r[:a][1] # (row_column).a[1]
#
# The only other support is the splat method:
#
# r.splat # (row_column.*)
#
# The splat method is necessary if you are trying to reference a table's type when the
# table has the same name as one of it's columns. For example:
#
# DB.create_table(:a){Integer :a; Integer :b}
#
# Let's say you want to reference the composite type for the table:
#
# a = Sequel.pg_row_op(:a)
# DB[:a].select(a[:b]) # SELECT (a).b FROM a
#
# Unfortunately, that doesn't work, as it references the integer column, not the table.
# The splat method works around this:
#
# DB[:a].select(a.splat[:b]) # SELECT (a.*).b FROM a
#
# Splat also takes an argument which is used for casting. This is necessary if you
# want to return the composite type itself, instead of the columns in the composite
# type. For example:
#
# DB[:a].select(a.splat).first # SELECT (a.*) FROM a
# # => {:a=>1, :b=>2}
#
# By casting the expression, you can get a composite type returned:
#
# DB[:a].select(a.splat(:a)).first # SELECT (a.*)::a FROM a
# # => {:a=>"(1,2)"} # or {:a=>{:a=>1, :b=>2}} if the "a" type has been registered
# # with the pg_row extension
#
# This feature is mostly useful for a different way to graph tables:
#
# DB[:a].join(:b, id: :b_id).select(Sequel.pg_row_op(:a).splat(:a),
# Sequel.pg_row_op(:b).splat(:b))
# # SELECT (a.*)::a, (b.*)::b FROM a INNER JOIN b ON (b.id = a.b_id)
# # => {:a=>{:id=>1, :b_id=>2}, :b=>{:id=>2}}
#
# Related module: Sequel::Postgres::PGRowOp
#
module Sequel
module Postgres
# This class represents a composite type expression reference.
class PGRowOp < SQL::PlaceholderLiteralString
ROW = ['(', '.*)'].freeze.each(&:freeze)
ROW_CAST = ['(', '.*)::'].freeze.each(&:freeze)
QUALIFY = ['(', ').'].freeze.each(&:freeze)
WRAP = [""].freeze.each(&:freeze)
# Wrap the expression in a PGRowOp, without changing the
# SQL it would use.
def self.wrap(expr)
PGRowOp.new(WRAP, [expr])
end
# Access a member of the composite type if given a
# symbol or an SQL::Identifier. For all other access,
# assuming the pg_array_ops extension is loaded and
# that it represents an array access. In either
# case, return a PgRowOp so that access can be cascaded.
def [](member)
case member
when Symbol, SQL::Identifier
PGRowOp.new(QUALIFY, [self, member])
else
PGRowOp.wrap(Sequel.pg_array_op(self)[member])
end
end
# Use the (identifier).* syntax to reference the members
# of the composite type as separate columns. Generally
# used when you want to expand the columns of a composite
# type to be separate columns in the result set.
#
# Sequel.pg_row_op(:a).* # (a).*
# Sequel.pg_row_op(:a)[:b].* # ((a).b).*
def *(ce=(arg=false;nil))
if arg == false
Sequel::SQL::ColumnAll.new([self])
else
super(ce)
end
end
# Use the (identifier.*) syntax to indicate that this
# expression represents the composite type of one
# of the tables being referenced, if it has the same
# name as one of the columns. If the cast_to argument
# is given, also cast the expression to that type
# (which should be a symbol representing the composite type).
# This is used if you want to return whole table row as a
# composite type.
#
# Sequel.pg_row_op(:a).splat[:b] # (a.*).b
# Sequel.pg_row_op(:a).splat(:a) # (a.*)::a
def splat(cast_to=nil)
if args.length > 1
raise Error, 'cannot splat a PGRowOp with multiple arguments'
end
if cast_to
PGRowOp.new(ROW_CAST, args + [cast_to])
else
PGRowOp.new(ROW, args)
end
end
module ExpressionMethods
# Return a PGRowOp wrapping the receiver.
def pg_row
Sequel.pg_row_op(self)
end
end
end
# :nocov:
if defined?(PGRow::ArrayRow)
# :nocov:
class PGRow::ArrayRow
# Wrap the PGRow::ArrayRow instance in an PGRowOp, allowing you to easily use
# the PostgreSQL row functions and operators with literal rows.
def op
Sequel.pg_row_op(self)
end
end
end
# :nocov:
if defined?(PGRow::HashRow)
# :nocov:
class PGRow::HashRow
# Wrap the PGRow::ArrayRow instance in an PGRowOp, allowing you to easily use
# the PostgreSQL row functions and operators with literal rows.
def op
Sequel.pg_row_op(self)
end
end
end
end
module SQL::Builders
# Return a PGRowOp wrapping the given expression.
def pg_row_op(expr)
Postgres::PGRowOp.wrap(expr)
end
end
class SQL::GenericExpression
include Sequel::Postgres::PGRowOp::ExpressionMethods
end
class LiteralString
include Sequel::Postgres::PGRowOp::ExpressionMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::Postgres::PGRowOp::ExpressionMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::Postgres::PGRowOp::ExpressionMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/pg_static_cache_updater.rb 0000664 0000000 0000000 00000013006 14342141206 0024514 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_static_cache_updater extension is designed to
# automatically update the caches in the models using the
# static_cache plugin when changes to the underlying tables
# are detected.
#
# Before using the extension in production, you have to add
# triggers to the tables for the classes where you want the
# caches updated automatically. You would generally do this
# during a migration:
#
# Sequel.migration do
# up do
# extension :pg_static_cache_updater
# create_static_cache_update_function
# create_static_cache_update_trigger(:table_1)
# create_static_cache_update_trigger(:table_2)
# end
# down do
# extension :pg_static_cache_updater
# drop_trigger(:table_2, default_static_cache_update_name)
# drop_trigger(:table_1, default_static_cache_update_name)
# drop_function(default_static_cache_update_name)
# end
# end
#
# After the triggers have been added, in your application process,
# after setting up your models, you need to listen for changes to
# the underlying tables:
#
# class Model1 < Sequel::Model(:table_1)
# plugin :static_cache
# end
# class Model2 < Sequel::Model(:table_2)
# plugin :static_cache
# end
#
# DB.extension :pg_static_cache_updater
# DB.listen_for_static_cache_updates([Model1, Model2])
#
# When an INSERT/UPDATE/DELETE happens on the underlying table,
# the trigger will send a notification with the table's OID.
# The application(s) listening on that channel will receive
# the notification, check the oid to see if it matches one
# for the model tables it is interested in, and tell that model
# to reload the cache if there is a match.
#
# Note that listen_for_static_cache_updates spawns a new thread
# which will reserve its own database connection. This thread
# runs until the application process is shutdown.
#
# Also note that PostgreSQL does not send notifications to
# channels until after the transaction including the changes
# is committed. Also, because a separate thread is used to
# listen for notifications, there may be a slight delay between
# when the transaction is committed and when the cache is
# reloaded.
#
# Requirements:
# * PostgreSQL 9.0+
# * Listening Database object must be using the postgres adapter
# with the pg driver (the model classes do not have to
# use the same Database).
# * Must be using a thread-safe connection pool (the default).
#
# Related module: Sequel::Postgres::StaticCacheUpdater
#
module Sequel
module Postgres
module StaticCacheUpdater
# Add the static cache update function to the PostgreSQL database.
# This must be added before any triggers using this function are
# added.
#
# Options:
# :channel_name :: Override the channel name to use.
# :function_name :: Override the function name to use.
def create_static_cache_update_function(opts=OPTS)
create_function(opts[:function_name]||default_static_cache_update_name, <:trigger, :language=>:plpgsql)
BEGIN
PERFORM pg_notify(#{literal((opts[:channel_name]||default_static_cache_update_name).to_s)}, TG_RELID::text);
RETURN NULL;
END
SQL
end
# Add a trigger to the given table that calls the function
# which will notify about table changes.
#
# Options:
# :function_name :: Override the function name to use.
# :trigger_name :: Override the trigger name to use.
def create_static_cache_update_trigger(table, opts=OPTS)
create_trigger(table, opts[:trigger_name]||default_static_cache_update_name, opts[:function_name]||default_static_cache_update_name, :after=>true)
end
# The default name for the function, trigger, and notification channel
# for this extension.
def default_static_cache_update_name
:sequel_static_cache_update
end
# Listen on the notification channel for changes to any of tables for
# the models given in a new thread. If notified about a change to one of the tables,
# reload the cache for the related model. Options given are also
# passed to Database#listen.
#
# Note that this implementation does not currently support multiple
# models that use the same underlying table.
#
# Options:
# :channel_name :: Override the channel name to use.
# :before_thread_exit :: An object that responds to +call+ that is called before the
# the created thread exits.
def listen_for_static_cache_updates(models, opts=OPTS)
raise Error, "this database object does not respond to listen, use the postgres adapter with the pg driver" unless respond_to?(:listen)
models = [models] unless models.is_a?(Array)
raise Error, "array of models to listen for changes cannot be empty" if models.empty?
oid_map = {}
models.each do |model|
raise Error, "#{model.inspect} does not use the static_cache plugin" unless model.respond_to?(:load_cache)
oid_map[get(regclass_oid(model.dataset.first_source_table))] = model
end
Thread.new do
begin
listen(opts[:channel_name]||default_static_cache_update_name, {:loop=>true}.merge!(opts)) do |_, _, oid|
if model = oid_map[oid.to_i]
model.load_cache
end
end
ensure
opts[:before_thread_exit].call if opts[:before_thread_exit]
end
end
end
end
end
Database.register_extension(:pg_static_cache_updater, Postgres::StaticCacheUpdater)
end
sequel-5.63.0/lib/sequel/extensions/pg_timestamptz.rb 0000664 0000000 0000000 00000001353 14342141206 0022741 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pg_timestamptz extension changes the default timestamp
# type for the database to be +timestamptz+ (+timestamp with time zone+)
# instead of +timestamp+ (+timestamp without time zone+). This is
# recommended if you are dealing with multiple timezones in your application.
#
# To load the extension into the database:
#
# DB.extension :pg_timestamptz
#
# Related module: Sequel::Postgres::Timestamptz
#
module Sequel
module Postgres
module Timestamptz
private
# Use timestamptz by default for generic timestamp value.
def type_literal_generic_datetime(column)
:timestamptz
end
end
end
Database.register_extension(:pg_timestamptz, Postgres::Timestamptz)
end
sequel-5.63.0/lib/sequel/extensions/pretty_table.rb 0000664 0000000 0000000 00000001672 14342141206 0022374 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The pretty_table extension adds Sequel::Dataset#print and the
# Sequel::PrettyTable class for creating nice-looking plain-text
# tables. Example:
#
# +--+-------+
# |id|name |
# |--+-------|
# |1 |fasdfas|
# |2 |test |
# +--+-------+
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:pretty_table)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:pretty_table)
#
# Related module: Sequel::DatasetPrinter
#
module Sequel
extension :_pretty_table
module DatasetPrinter
# Pretty prints the records in the dataset as plain-text table.
def print(*cols)
ds = naked
rows = ds.all
Sequel::PrettyTable.print(rows, cols.empty? ? ds.columns : cols)
end
end
Dataset.register_extension(:pretty_table, DatasetPrinter)
end
sequel-5.63.0/lib/sequel/extensions/query.rb 0000664 0000000 0000000 00000004453 14342141206 0021043 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The query extension adds a query method which allows
# a different way to construct queries instead of the usual
# method chaining:
#
# dataset = DB[:items].query do
# select :x, :y, :z
# where{(x > 1) & (y > 2)}
# reverse :z
# end
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:query)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:query)
#
# Related modules: Sequel::DatabaseQuery, Sequel::DatasetQuery,
# Sequel::Dataset::Query
#
module Sequel
module DatabaseQuery
def self.extended(db)
db.extend_datasets(DatasetQuery)
end
# Return a dataset modified by the query block
def query(&block)
dataset.query(&block)
end
end
module DatasetQuery
# Translates a query block into a dataset. Query blocks are an
# alternative to Sequel's usual method chaining, by using
# instance_exec with a proxy object:
#
# dataset = DB[:items].query do
# select :x, :y, :z
# where{(x > 1) & (y > 2)}
# reverse :z
# end
#
# Which is the same as:
#
# dataset = DB[:items].select(:x, :y, :z).where{(x > 1) & (y > 2)}.reverse(:z)
def query(&block)
query = Dataset::Query.new(self)
query.instance_exec(&block)
query.dataset
end
end
class Dataset
# Proxy object used by Dataset#query.
class Query < Sequel::BasicObject
# The current dataset in the query. This changes on each method call.
attr_reader :dataset
def initialize(dataset)
@dataset = dataset
end
# Replace the query's dataset with dataset returned by the method call.
def method_missing(method, *args, &block)
# Allow calling private methods, so things like raise works
@dataset = @dataset.send(method, *args, &block)
raise(Sequel::Error, "method #{method.inspect} did not return a dataset") unless @dataset.is_a?(Dataset)
self
end
# :nocov:
ruby2_keywords(:method_missing) if respond_to?(:ruby2_keywords, true)
# :nocov:
end
end
Dataset.register_extension(:query, DatasetQuery)
Database.register_extension(:query, DatabaseQuery)
end
sequel-5.63.0/lib/sequel/extensions/round_timestamps.rb 0000664 0000000 0000000 00000002743 14342141206 0023273 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The round_timestamps extension will automatically round timestamp
# values to the database's supported level of precision before literalizing
# them.
#
# For example, if the database supports millisecond precision, and you give
# it a Time value with microsecond precision, it will round it appropriately:
#
# Time.at(1405341161.917999982833862)
# # default: 2014-07-14 14:32:41.917999
# # with extension: 2014-07-14 14:32:41.918000
#
# The round_timestamps extension correctly deals with databases that support
# millisecond or second precision. In addition to handling Time values, it
# also handles DateTime values and Sequel::SQLTime values (for the TIME type).
#
# To round timestamps for a single dataset:
#
# ds = ds.extension(:round_timestamps)
#
# To round timestamps for all datasets on a single database:
#
# DB.extension(:round_timestamps)
#
# Related module: Sequel::Dataset::RoundTimestamps
module Sequel
class Dataset
module RoundTimestamps
# Round DateTime values before literalizing
def literal_datetime(v)
super(v + Rational(5, 10**timestamp_precision)/864000)
end
# Round Sequel::SQLTime values before literalizing
def literal_sqltime(v)
super(v.round(timestamp_precision))
end
# Round Time values before literalizing
def literal_time(v)
super(v.round(timestamp_precision))
end
end
register_extension(:round_timestamps, RoundTimestamps)
end
end
sequel-5.63.0/lib/sequel/extensions/run_transaction_hooks.rb 0000664 0000000 0000000 00000003765 14342141206 0024317 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The run_transaction_hooks extension allows for running after_commit or
# after_rollback extensions before commit or rollback. It then removes
# the hook after running it, so it will not be run twice.
#
# This extension should only be used in transactional tests where the
# transaction always rolls back, to test the behavior of the after_commit
# and after_rollback hooks. Any other usage is probably a bad idea.
#
# Example:
#
# DB.extension :run_transaction_hooks
# x = 1
# DB.transaction(rollback: :always) do
# DB.after_rollback{x = 3}
# DB.after_commit{x = 2}
#
# x # => 1
# DB.run_after_rollback_hooks
# x # => 3
# DB.run_after_commit_hooks
# x # => 2
# end
# x # => 2
#
class Sequel::Database
module RunTransactionHooks
# Run all savepoint and transaction after_commit hooks for the current transaction,
# and remove the hooks after running them.
# Options:
# :server :: The server/shard to use.
def run_after_commit_hooks(opts=OPTS)
_run_transaction_hooks(:after_commit, opts)
end
# Run all savepoint and transaction after_rollback hooks for the current transaction,
# and remove the hooks after running them.
# Options:
# :server :: The server/shard to use.
def run_after_rollback_hooks(opts=OPTS)
_run_transaction_hooks(:after_rollback, opts)
end
private
def _run_transaction_hooks(type, opts)
synchronize(opts[:server]) do |conn|
unless h = _trans(conn)
raise Sequel::Error, "Cannot call run_#{type}_hooks outside of a transaction"
end
if hooks = h[type]
hooks.each(&:call)
hooks.clear
end
if (savepoints = h[:savepoints])
savepoints.each do |savepoint|
if hooks = savepoint[type]
hooks.each(&:call)
hooks.clear
end
end
end
end
end
end
register_extension(:run_transaction_hooks, RunTransactionHooks)
end
sequel-5.63.0/lib/sequel/extensions/s.rb 0000664 0000000 0000000 00000002610 14342141206 0020131 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The s extension adds Sequel::S, a module containing a private #S
# method that calls Sequel.expr. It's designed as a shortcut so
# that instead of:
#
# Sequel.expr(:column) + 1
# # or
# Sequel.expr{column + 1}
#
# you can just write:
#
# S(:column) + 1
# # or
# S{column + 1}
#
# To load the extension:
#
# Sequel.extension :s
#
# Then you can include the Sequel::S module into whatever classes or
# objects you care about:
#
# Sequel::Model.send(:include, Sequel::S) # available in model instance methods
# Sequel::Model.extend(Sequel::S) # available in model class methods
# Sequel::Dataset.send(:include, Sequel::S) # available in dataset methods
#
# or just into Object if you want it available everywhere:
#
# Object.send(:include, Sequel::S)
#
# If you are using Ruby 2+, and you would like to use refinements, you
# can use Sequel::S as a refinement, in which case the private #S method
# will be available on all objects while the refinement is active.
#
# using Sequel::S
#
# S(:column) + 1
#
# Related module: Sequel::S
#
module Sequel::S
private
# Delegate to Sequel.expr
def S(*a, &block)
Sequel.expr(*a, &block)
end
# :nocov:
if RUBY_VERSION >= '2.0.0'
include_meth = RUBY_VERSION >= '3.1' ? :import_methods : :include
# :nocov:
refine Object do
send include_meth, Sequel::S
end
end
end
sequel-5.63.0/lib/sequel/extensions/schema_caching.rb 0000664 0000000 0000000 00000005561 14342141206 0022613 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The schema_caching extension adds a few methods to Sequel::Database
# that make it easy to dump the parsed schema information to a file,
# and load it from that file. Loading the schema information from a
# dumped file is faster than parsing it from the database, so this
# can save bootup time for applications with large numbers of models.
#
# Basic usage in application code:
#
# DB = Sequel.connect('...')
# DB.extension :schema_caching
# DB.load_schema_cache('/path/to/schema.dump')
#
# # load model files
#
# Then, whenever the database schema is modified, write a new cached
# file. You can do that with bin/sequel's -S option:
#
# bin/sequel -S /path/to/schema.dump postgres://...
#
# Alternatively, if you don't want to dump the schema information for
# all tables, and you don't worry about race conditions, you can
# choose to use the following in your application code:
#
# DB = Sequel.connect('...')
# DB.extension :schema_caching
# DB.load_schema_cache?('/path/to/schema.dump')
#
# # load model files
#
# DB.dump_schema_cache?('/path/to/schema.dump')
#
# With this method, you just have to delete the schema dump file if
# the schema is modified, and the application will recreate it for you
# using just the tables that your models use.
#
# Note that it is up to the application to ensure that the dumped
# cached schema reflects the current state of the database. Sequel
# does no checking to ensure this, as checking would take time and the
# purpose of this code is to take a shortcut.
#
# The cached schema is dumped in Marshal format, since it is the fastest
# and it handles all ruby objects used in the schema hash. Because of this,
# you should not attempt to load the schema from a untrusted file.
#
# Related module: Sequel::SchemaCaching
#
module Sequel
module SchemaCaching
# Dump the cached schema to the filename given in Marshal format.
def dump_schema_cache(file)
sch = {}
@schemas.each do |k,v|
sch[k] = v.map do |c, h|
h = Hash[h]
h.delete(:callable_default)
[c, h]
end
end
File.open(file, 'wb'){|f| f.write(Marshal.dump(sch))}
nil
end
# Dump the cached schema to the filename given unless the file
# already exists.
def dump_schema_cache?(file)
dump_schema_cache(file) unless File.exist?(file)
end
# Replace the schema cache with the data from the given file, which
# should be in Marshal format.
def load_schema_cache(file)
@schemas = Marshal.load(File.read(file))
@schemas.each_value{|v| schema_post_process(v)}
nil
end
# Replace the schema cache with the data from the given file if the
# file exists.
def load_schema_cache?(file)
load_schema_cache(file) if File.exist?(file)
end
end
Database.register_extension(:schema_caching, SchemaCaching)
end
sequel-5.63.0/lib/sequel/extensions/schema_dumper.rb 0000664 0000000 0000000 00000051147 14342141206 0022514 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The schema_dumper extension supports dumping tables and indexes
# in a Sequel::Migration format, so they can be restored on another
# database (which can be the same type or a different type than
# the current database). The main interface is through
# Sequel::Database#dump_schema_migration.
#
# The schema_dumper extension is quite limited in what types of
# database objects it supports. In general, it only supports
# dumping tables, columns, primary key and foreign key constraints,
# and some indexes. It does not support most table options, CHECK
# constraints, partial indexes, database functions, triggers,
# security grants/revokes, and a wide variety of other useful
# database properties. Be aware of the limitations when using the
# schema_dumper extension. If you are dumping the schema to restore
# to the same database type, it is recommended to use your database's
# dump and restore programs instead of the schema_dumper extension.
#
# To load the extension:
#
# DB.extension :schema_dumper
#
# Related module: Sequel::SchemaDumper
Sequel.extension :eval_inspect
module Sequel
module SchemaDumper
# Convert the column schema information to a hash of column options, one of which must
# be :type. The other options added should modify that type (e.g. :size). If a
# database type is not recognized, return it as a String type.
def column_schema_to_ruby_type(schema)
type = schema[:db_type].downcase
if database_type == :oracle
type = type.sub(/ not null\z/, '')
end
case type
when /\A(medium|small)?int(?:eger)?(?:\((\d+)\))?( unsigned)?\z/
if !$1 && $2 && $2.to_i >= 10 && $3
# Unsigned integer type with 10 digits can potentially contain values which
# don't fit signed integer type, so use bigint type in target database.
{:type=>:Bignum}
else
{:type=>Integer}
end
when /\Atinyint(?:\((\d+)\))?(?: unsigned)?\z/
{:type =>schema[:type] == :boolean ? TrueClass : Integer}
when /\Abigint(?:\((?:\d+)\))?(?: unsigned)?\z/
{:type=>:Bignum}
when /\A(?:real|float|double(?: precision)?|double\(\d+,\d+\))(?: unsigned)?\z/
{:type=>Float}
when 'boolean', 'bit', 'bool'
{:type=>TrueClass}
when /\A(?:(?:tiny|medium|long|n)?text|clob)\z/
{:type=>String, :text=>true}
when 'date'
{:type=>Date}
when /\A(?:small)?datetime\z/
{:type=>DateTime}
when /\Atimestamp(?:\((\d+)\))?(?: with(?:out)? time zone)?\z/
{:type=>DateTime, :size=>($1.to_i if $1)}
when /\Atime(?: with(?:out)? time zone)?\z/
{:type=>Time, :only_time=>true}
when /\An?char(?:acter)?(?:\((\d+)\))?\z/
{:type=>String, :size=>($1.to_i if $1), :fixed=>true}
when /\A(?:n?varchar2?|character varying|bpchar|string)(?:\((\d+)\))?\z/
{:type=>String, :size=>($1.to_i if $1)}
when /\A(?:small)?money\z/
{:type=>BigDecimal, :size=>[19,2]}
when /\A(?:decimal|numeric|number)(?:\((\d+)(?:,\s*(\d+))?\))?(?: unsigned)?\z/
s = [($1.to_i if $1), ($2.to_i if $2)].compact
{:type=>BigDecimal, :size=>(s.empty? ? nil : s)}
when /\A(?:bytea|(?:tiny|medium|long)?blob|(?:var)?binary)(?:\((\d+)\))?\z/
{:type=>File, :size=>($1.to_i if $1)}
when /\A(?:year|(?:int )?identity)\z/
{:type=>Integer}
else
{:type=>String}
end
end
# Dump foreign key constraints for all tables as a migration. This complements
# the foreign_keys: false option to dump_schema_migration. This only dumps
# the constraints (not the columns) using alter_table/add_foreign_key with an
# array of columns.
#
# Note that the migration this produces does not have a down
# block, so you cannot reverse it.
def dump_foreign_key_migration(options=OPTS)
ts = tables(options)
<true' if !options[:same_db] && options[:indexes] != false && !gen.indexes.empty?}) do\n#{commands.gsub(/^/, ' ')}\nend"
end
private
# If a database default exists and can't be converted, and we are dumping with :same_db,
# return a string with the inspect method modified a literal string is created if the code is evaled.
def column_schema_to_ruby_default_fallback(default, options)
if default.is_a?(String) && options[:same_db] && use_column_schema_to_ruby_default_fallback?
default = default.dup
def default.inspect
"Sequel::LiteralString.new(#{super})"
end
default
end
end
# Recreate the column in the passed Schema::CreateTableGenerator from the given name and parsed database schema.
def recreate_column(name, schema, gen, options)
if options[:single_pk] && schema_autoincrementing_primary_key?(schema)
type_hash = options[:same_db] ? {:type=>schema[:db_type]} : column_schema_to_ruby_type(schema)
[:table, :key, :on_delete, :on_update, :deferrable].each{|f| type_hash[f] = schema[f] if schema[f]}
if type_hash == {:type=>Integer} || type_hash == {:type=>"integer"} || type_hash == {:type=>"INTEGER"}
type_hash.delete(:type)
elsif options[:same_db] && type_hash == {:type=>type_literal_generic_bignum_symbol(type_hash).to_s}
type_hash[:type] = :Bignum
end
unless gen.columns.empty?
type_hash[:keep_order] = true
end
if type_hash.empty?
gen.primary_key(name)
else
gen.primary_key(name, type_hash)
end
else
col_opts = if options[:same_db]
h = {:type=>schema[:db_type]}
if database_type == :mysql && h[:type] =~ /\Atimestamp/
h[:null] = true
end
h
else
column_schema_to_ruby_type(schema)
end
type = col_opts.delete(:type)
col_opts.delete(:size) if col_opts[:size].nil?
if schema[:generated]
if options[:same_db] && database_type == :postgres
col_opts[:generated_always_as] = column_schema_to_ruby_default_fallback(schema[:default], options)
end
else
col_opts[:default] = if schema[:ruby_default].nil?
column_schema_to_ruby_default_fallback(schema[:default], options)
else
schema[:ruby_default]
end
col_opts.delete(:default) if col_opts[:default].nil?
end
col_opts[:null] = false if schema[:allow_null] == false
if table = schema[:table]
[:key, :on_delete, :on_update, :deferrable].each{|f| col_opts[f] = schema[f] if schema[f]}
col_opts[:type] = type unless type == Integer || type == 'integer' || type == 'INTEGER'
gen.foreign_key(name, table, col_opts)
else
gen.column(name, type, col_opts)
if [Integer, :Bignum, Float, BigDecimal].include?(type) && schema[:db_type] =~ / unsigned\z/io
gen.check(Sequel::SQL::Identifier.new(name) >= 0)
end
end
end
end
# For the table and foreign key metadata array, return an alter_table
# string that would add the foreign keys if run in a migration.
def dump_add_fk_constraints(table, fks)
sfks = String.new
sfks << "alter_table(#{table.inspect}) do\n"
sfks << create_table_generator do
fks.sort_by{|fk| fk[:columns]}.each do |fk|
foreign_key fk[:columns], fk
end
end.dump_constraints.gsub(/^foreign_key /, ' add_foreign_key ')
sfks << "\nend"
end
# For the table given, get the list of foreign keys and return an alter_table
# string that would add the foreign keys if run in a migration.
def dump_table_foreign_keys(table, options=OPTS)
if supports_foreign_key_parsing?
fks = foreign_key_list(table, options).sort_by{|fk| fk[:columns]}
end
if fks.nil? || fks.empty?
''
else
dump_add_fk_constraints(table, fks)
end
end
# Return a Schema::CreateTableGenerator object that will recreate the
# table's schema. Takes the same options as dump_schema_migration.
def dump_table_generator(table, options=OPTS)
s = schema(table, options).dup
pks = s.find_all{|x| x.last[:primary_key] == true}.map(&:first)
options = options.merge(:single_pk=>true) if pks.length == 1
m = method(:recreate_column)
im = method(:index_to_generator_opts)
if options[:indexes] != false && supports_index_parsing?
indexes = indexes(table).sort
end
if options[:foreign_keys] != false && supports_foreign_key_parsing?
fk_list = foreign_key_list(table)
if (sfk = options[:skipped_foreign_keys]) && (sfkt = sfk[table])
fk_list.delete_if{|fk| sfkt.has_key?(fk[:columns])}
end
composite_fks, single_fks = fk_list.partition{|h| h[:columns].length > 1}
fk_hash = {}
single_fks.each do |fk|
column = fk.delete(:columns).first
fk.delete(:name)
fk_hash[column] = fk
end
s = s.map do |name, info|
if fk_info = fk_hash[name]
[name, fk_info.merge(info)]
else
[name, info]
end
end
end
create_table_generator do
s.each{|name, info| m.call(name, info, self, options)}
primary_key(pks) if !@primary_key && pks.length > 0
indexes.each{|iname, iopts| send(:index, iopts[:columns], im.call(table, iname, iopts, options))} if indexes
composite_fks.each{|fk| send(:foreign_key, fk[:columns], fk)} if composite_fks
end
end
# Return a string that containing add_index/drop_index method calls for
# creating the index migration.
def dump_table_indexes(table, meth, options=OPTS)
if supports_index_parsing?
indexes = indexes(table).sort
else
return ''
end
im = method(:index_to_generator_opts)
gen = create_table_generator do
indexes.each{|iname, iopts| send(:index, iopts[:columns], im.call(table, iname, iopts, options))}
end
gen.dump_indexes(meth=>table, :ignore_errors=>!options[:same_db])
end
# Convert the parsed index information into options to the CreateTableGenerator's index method.
def index_to_generator_opts(table, name, index_opts, options=OPTS)
h = {}
if options[:index_names] != false && default_index_name(table, index_opts[:columns]) != name.to_s
if options[:index_names] == :namespace && !global_index_namespace?
h[:name] = "#{table}_#{name}".to_sym
else
h[:name] = name
end
end
h[:unique] = true if index_opts[:unique]
h[:deferrable] = true if index_opts[:deferrable]
h
end
# Sort the tables so that referenced tables are created before tables that
# reference them, and then by name. If foreign keys are disabled, just sort by name.
def sort_dumped_tables(tables, options=OPTS)
if options[:foreign_keys] != false && supports_foreign_key_parsing?
table_fks = {}
tables.each{|t| table_fks[t] = foreign_key_list(t)}
# Remove self referential foreign keys, not important when sorting.
table_fks.each{|t, fks| fks.delete_if{|fk| fk[:table] == t}}
tables, skipped_foreign_keys = sort_dumped_tables_topologically(table_fks, [])
options[:skipped_foreign_keys] = skipped_foreign_keys
tables
else
tables.sort
end
end
# Do a topological sort of tables, so that referenced tables
# come before referencing tables. Returns an array of sorted
# tables and a hash of skipped foreign keys. The hash will be
# empty unless there are circular dependencies.
def sort_dumped_tables_topologically(table_fks, sorted_tables)
skipped_foreign_keys = {}
until table_fks.empty?
this_loop = []
table_fks.each do |table, fks|
fks.delete_if{|fk| !table_fks.has_key?(fk[:table])}
this_loop << table if fks.empty?
end
if this_loop.empty?
# No tables were changed this round, there must be a circular dependency.
# Break circular dependency by picking the table with the least number of
# outstanding foreign keys and skipping those foreign keys.
# The skipped foreign keys will be added at the end of the
# migration.
skip_table, skip_fks = table_fks.sort_by{|table, fks| [fks.length, table]}.first
skip_fks_hash = skipped_foreign_keys[skip_table] = {}
skip_fks.each{|fk| skip_fks_hash[fk[:columns]] = fk}
this_loop << skip_table
end
# Add sorted tables from this loop to the final list
sorted_tables.concat(this_loop.sort)
# Remove tables that were handled this loop
this_loop.each{|t| table_fks.delete(t)}
end
[sorted_tables, skipped_foreign_keys]
end
# Don't use a literal string fallback on MySQL, since the defaults it uses aren't
# valid literal SQL values.
def use_column_schema_to_ruby_default_fallback?
database_type != :mysql
end
end
module Schema
class CreateTableGenerator
# Dump this generator's columns to a string that could be evaled inside
# another instance to represent the same columns
def dump_columns
strings = []
cols = columns.dup
cols.each do |x|
x.delete(:on_delete) if x[:on_delete] == :no_action
x.delete(:on_update) if x[:on_update] == :no_action
end
if (pkn = primary_key_name) && !@primary_key[:keep_order]
cols.delete_if{|x| x[:name] == pkn}
pk = @primary_key.dup
pkname = pk.delete(:name)
@db.serial_primary_key_options.each{|k,v| pk.delete(k) if v == pk[k]}
strings << "primary_key #{pkname.inspect}#{opts_inspect(pk)}"
end
cols.each do |c|
c = c.dup
name = c.delete(:name)
strings << if table = c.delete(:table)
c.delete(:type) if c[:type] == Integer || c[:type] == 'integer'
"foreign_key #{name.inspect}, #{table.inspect}#{opts_inspect(c)}"
elsif pkn == name
@db.serial_primary_key_options.each{|k,v| c.delete(k) if v == c[k]}
"primary_key #{name.inspect}#{opts_inspect(c)}"
else
type = c.delete(:type)
opts = opts_inspect(c)
case type
when Class
"#{type.name} #{name.inspect}#{opts}"
when :Bignum
"Bignum #{name.inspect}#{opts}"
else
"column #{name.inspect}, #{type.inspect}#{opts}"
end
end
end
strings.join("\n")
end
# Dump this generator's constraints to a string that could be evaled inside
# another instance to represent the same constraints
def dump_constraints
cs = constraints.map do |c|
c = c.dup
type = c.delete(:type)
case type
when :check
raise(Error, "can't dump check/constraint specified with Proc") if c[:check].is_a?(Proc)
name = c.delete(:name)
if !name and c[:check].length == 1 and c[:check].first.is_a?(Hash)
"check #{c[:check].first.inspect[1...-1]}"
else
"#{name ? "constraint #{name.inspect}," : 'check'} #{c[:check].map(&:inspect).join(', ')}"
end
when :foreign_key
c.delete(:on_delete) if c[:on_delete] == :no_action
c.delete(:on_update) if c[:on_update] == :no_action
c.delete(:deferrable) unless c[:deferrable]
cols = c.delete(:columns)
table = c.delete(:table)
"#{type} #{cols.inspect}, #{table.inspect}#{opts_inspect(c)}"
else
cols = c.delete(:columns)
"#{type} #{cols.inspect}#{opts_inspect(c)}"
end
end
cs.join("\n")
end
# Dump this generator's indexes to a string that could be evaled inside
# another instance to represent the same indexes. Options:
# :add_index :: Use add_index instead of index, so the methods
# can be called outside of a generator but inside a migration.
# The value of this option should be the table name to use.
# :drop_index :: Same as add_index, but create drop_index statements.
# :ignore_errors :: Add the ignore_errors option to the outputted indexes
def dump_indexes(options=OPTS)
is = indexes.map do |c|
c = c.dup
cols = c.delete(:columns)
if table = options[:add_index] || options[:drop_index]
"#{options[:drop_index] ? 'drop' : 'add'}_index #{table.inspect}, #{cols.inspect}#{', :ignore_errors=>true' if options[:ignore_errors]}#{opts_inspect(c)}"
else
"index #{cols.inspect}#{opts_inspect(c)}"
end
end
is = is.reverse if options[:drop_index]
is.join("\n")
end
private
# Return a string that converts the given options into one
# suitable for literal ruby code, handling default values
# that don't default to a literal interpretation.
def opts_inspect(opts)
if opts[:default]
opts = opts.dup
de = Sequel.eval_inspect(opts.delete(:default))
", :default=>#{de}#{", #{opts.inspect[1...-1]}" if opts.length > 0}"
else
", #{opts.inspect[1...-1]}" if opts.length > 0
end
end
end
end
Database.register_extension(:schema_dumper, SchemaDumper)
end
sequel-5.63.0/lib/sequel/extensions/select_remove.rb 0000664 0000000 0000000 00000004337 14342141206 0022533 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The select_remove extension adds select_remove for removing existing selected
# columns from a dataset. It's not part of Sequel core as it is rarely needed and has
# some corner cases where it can't work correctly.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:select_remove)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:select_remove)
#
# Related module: Sequel::SelectRemove
#
module Sequel
module SelectRemove
# Remove columns from the list of selected columns. If any of the currently selected
# columns use expressions/aliases, this will remove selected columns with the given
# aliases. It will also remove entries from the selection that match exactly:
#
# # Assume columns a, b, and c in items table
# DB[:items] # SELECT * FROM items
# DB[:items].select_remove(:c) # SELECT a, b FROM items
# DB[:items].select(:a, Sequel[:b].as(:c), Sequel[:c].as(:b)).select_remove(:c) # SELECT a, c AS b FROM items
# DB[:items].select(:a, Sequel[:b][:c], Sequel[:c][:b]).select_remove(Sequel[:c][:b]) # SELECT a, b AS c FROM items
#
# Note that there are a few cases where this method may not work correctly:
#
# * This dataset joins multiple tables and does not have an existing explicit selection.
# In this case, the code will currently use unqualified column names for all columns
# the dataset returns, except for the columns given.
# * This dataset has an existing explicit selection containing an item that returns
# multiple database columns (e.g. Sequel[:table].*, Sequel.lit('column1, column2')). In this case,
# the behavior is undefined and this method should not be used.
#
# There may be other cases where this method does not work correctly, use it with caution.
def select_remove(*cols)
if (sel = @opts[:select]) && !sel.empty?
select(*(columns.zip(sel).reject{|c, s| cols.include?(c)}.map{|c, s| s} - cols))
else
select(*(columns - cols))
end
end
end
Dataset.register_extension(:select_remove, SelectRemove)
end
sequel-5.63.0/lib/sequel/extensions/sequel_4_dataset_methods.rb 0000664 0000000 0000000 00000005363 14342141206 0024650 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# This adds the following dataset methods:
#
# and :: alias for where
# exclude_where :: alias for exclude
# interval :: Returns max - min, using a single query
# range :: Returns min..max, using a single query
#
# It is only recommended to use this for backwards compatibility.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:sequel_4_dataset_methods)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:sequel_4_dataset_methods)
#
# Related module: Sequel::Sequel4DatasetMethods
#
module Sequel
module Sequel4DatasetMethods
# Alias for where.
def and(*cond, &block)
where(*cond, &block)
end
# Alias for exclude.
def exclude_where(*cond, &block)
exclude(*cond, &block)
end
# Returns the interval between minimum and maximum values for the given
# column/expression. Uses a virtual row block if no argument is given.
#
# DB[:table].interval(:id) # SELECT (max(id) - min(id)) FROM table LIMIT 1
# # => 6
# DB[:table].interval{function(column)} # SELECT (max(function(column)) - min(function(column))) FROM table LIMIT 1
# # => 7
def interval(column=(no_arg = true), &block)
column = Sequel.virtual_row(&block) if no_arg
if loader = cached_placeholder_literalizer(:_interval_loader) do |pl|
arg = pl.arg
aggregate_dataset.limit(1).select((SQL::Function.new(:max, arg) - SQL::Function.new(:min, arg)).as(:interval))
end
loader.get(column)
else
aggregate_dataset.get{(max(column) - min(column)).as(:interval)}
end
end
# Returns a +Range+ instance made from the minimum and maximum values for the
# given column/expression. Uses a virtual row block if no argument is given.
#
# DB[:table].range(:id) # SELECT max(id) AS v1, min(id) AS v2 FROM table LIMIT 1
# # => 1..10
# DB[:table].interval{function(column)} # SELECT max(function(column)) AS v1, min(function(column)) AS v2 FROM table LIMIT 1
# # => 0..7
def range(column=(no_arg = true), &block)
column = Sequel.virtual_row(&block) if no_arg
r = if loader = cached_placeholder_literalizer(:_range_loader) do |pl|
arg = pl.arg
aggregate_dataset.limit(1).select(SQL::Function.new(:min, arg).as(:v1), SQL::Function.new(:max, arg).as(:v2))
end
loader.first(column)
else
aggregate_dataset.select{[min(column).as(v1), max(column).as(v2)]}.first
end
if r
(r[:v1]..r[:v2])
end
end
end
Dataset.register_extension(:sequel_4_dataset_methods, Sequel4DatasetMethods)
end
sequel-5.63.0/lib/sequel/extensions/server_block.rb 0000664 0000000 0000000 00000012771 14342141206 0022360 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The server_block extension adds the Database#with_server method, which takes a shard
# argument and a block, and makes it so that access inside the block will use the
# specified shard by default.
#
# First, you need to enable it on the database object:
#
# DB.extension :server_block
#
# Then you can call with_server:
#
# DB.with_server(:shard1) do
# DB[:a].all # Uses shard1
# DB[:a].server(:shard2).all # Uses shard2
# end
# DB[:a].all # Uses default
#
# You can nest calls to with_server:
#
# DB.with_server(:shard1) do
# DB[:a].all # Uses shard1
# DB.with_server(:shard2) do
# DB[:a].all # Uses shard2
# end
# DB[:a].all # Uses shard1
# end
# DB[:a].all # Uses default
#
# Note that if you pass the nil, :default, or :read_only server/shard
# names to Dataset#server inside a with_server block, they will be
# ignored and the server/shard given to with_server will be used:
#
# DB.with_server(:shard1) do
# DB[:a].all # Uses shard1
# DB[:a].server(:shard2).all # Uses shard2
# DB[:a].server(nil).all # Uses shard1
# DB[:a].server(:default).all # Uses shard1
# DB[:a].server(:read_only).all # Uses shard1
# end
#
# If you pass two separate shards to with_server, the second shard will
# be used instead of the :read_only shard, and the first shard will be
# used instead of the :default shard:
#
# DB.with_server(:shard1, :shard2) do
# DB[:a].all # Uses shard2
# DB[:a].delete # Uses shard1
# DB[:a].server(:shard3).all # Uses shard3
# DB[:a].server(:shard3).delete # Uses shard3
# DB[:a].server(:default).all # Uses shard1
# DB[:a].server(:read_only).delete # Uses shard2
# end
#
# If you use an invalid server when calling with_server, it will be
# treated the same way as if you called Dataset#server with an invalid
# server. By default, the default server will be used in such cases.
# If you would like a different server to be used, or an exception to
# be raised, then use the :servers_hash Database option.
#
# Related modules: Sequel::ServerBlock, Sequel::UnthreadedServerBlock,
# Sequel::ThreadedServerBlock
#
module Sequel
module ServerBlock
# Enable the server block on the connection pool, choosing the correct
# extension depending on whether the connection pool is threaded or not.
# Also defines the with_server method on the receiver for easy use.
def self.extended(db)
pool = db.pool
if defined?(ShardedThreadedConnectionPool) && pool.is_a?(ShardedThreadedConnectionPool)
pool.extend(ThreadedServerBlock)
pool.instance_variable_set(:@default_servers, {})
else
pool.extend(UnthreadedServerBlock)
pool.instance_variable_set(:@default_servers, [])
end
end
# Delegate to the connection pool
def with_server(default_server, read_only_server=default_server, &block)
pool.with_server(default_server, read_only_server, &block)
end
end
# Adds with_server support for the sharded single connection pool.
module UnthreadedServerBlock
# Set a default server/shard to use inside the block.
def with_server(default_server, read_only_server=default_server)
set_default_server(default_server, read_only_server)
yield
ensure
clear_default_server
end
private
# Make the given server the new default server.
def set_default_server(default_server, read_only_server=default_server)
@default_servers << [default_server, read_only_server]
end
# Remove the current default server, restoring the
# previous default server.
def clear_default_server
@default_servers.pop
end
# Use the server given to with_server if appropriate.
def pick_server(server)
if @default_servers.empty?
super
else
case server
when :default, nil
@servers[@default_servers[-1][0]]
when :read_only
@servers[@default_servers[-1][1]]
else
super
end
end
end
end
# Adds with_server support for the sharded threaded connection pool.
module ThreadedServerBlock
# Set a default server/shard to use inside the block for the current
# thread.
def with_server(default_server, read_only_server=default_server)
set_default_server(default_server, read_only_server)
yield
ensure
clear_default_server
end
private
# Make the given server the new default server for the current thread.
def set_default_server(default_server, read_only_server=default_server)
sync{(@default_servers[Sequel.current] ||= [])} << [default_server, read_only_server]
end
# Remove the current default server for the current thread, restoring the
# previous default server.
def clear_default_server
t = Sequel.current
a = sync{@default_servers[t]}
a.pop
sync{@default_servers.delete(t)} if a.empty?
end
# Use the server given to with_server for the given thread, if appropriate.
def pick_server(server)
a = sync{@default_servers[Sequel.current]}
if !a || a.empty?
super
else
# Hash handling required to work when loaded after arbitrary servers plugin.
case server
when :default, nil
v = a[-1][0]
v = @servers[v] unless v.is_a?(Hash)
v
when :read_only
v = a[-1][1]
v = @servers[v] unless v.is_a?(Hash)
v
else
super
end
end
end
end
Database.register_extension(:server_block, ServerBlock)
end
sequel-5.63.0/lib/sequel/extensions/server_logging.rb 0000664 0000000 0000000 00000003544 14342141206 0022712 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The server_logging extension makes the logger include the server/shard
# the query was issued on. This makes it easier to use the logs when
# using sharding.
#
# Example:
#
# DB.opts[:server]
# # {:read_only=>{}, :b=>{}}
# DB.extension :server_logging
# DB[:a].all
# # (0.000005s) (conn: 1014942550, server: read_only) SELECT * FROM a
# DB[:a].server(:b).all
# # (0.000004s) (conn: 997304100, server: b) SELECT * FROM a
# DB[:a].insert
# # (0.000004s) (conn: 1014374750, server: default) INSERT INTO a DEFAULT VALUES
#
# In order for the server/shard to be correct for all connections, you need to
# use this before connections to the database are made, or you need to call
# Database#disconnect after loading this extension.
#
# Related module: Sequel::ServerLogging
#
module Sequel
module ServerLogging
# Initialize the hash mapping connections to shards, and turn on logging
# of connection info unless it has specifically been turned off.
def self.extended(db)
db.instance_exec do
@server_connection_map ||= {}
self.log_connection_info = true if log_connection_info.nil?
end
end
# When setting up a new connection, associate the connection with the
# shard.
def connect(server)
conn = super
Sequel.synchronize{@server_connection_map[conn] = server}
conn
end
# When disconnecting a connection, remove the related connection from the mapping.
def disconnect_connection(conn)
super
ensure
Sequel.synchronize{@server_connection_map.delete(conn)}
end
private
# Include the server with the connection's id.
def connection_info(conn)
"(conn: #{conn.__id__}, server: #{Sequel.synchronize{@server_connection_map[conn]}}) "
end
end
Database.register_extension(:server_logging, ServerLogging)
end
sequel-5.63.0/lib/sequel/extensions/split_array_nil.rb 0000664 0000000 0000000 00000004252 14342141206 0023066 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The split_array_nil extension overrides Sequel's default handling of
# IN/NOT IN with arrays of values to do specific nil checking. For example,
#
# ds = DB[:table].where(column: [1, nil])
#
# By default, that produces the following SQL:
#
# SELECT * FROM table WHERE (column IN (1, NULL))
#
# However, because NULL = NULL is not true in SQL (it is NULL), this
# will not return rows in the table where the column is NULL. This
# extension allows for an alternative behavior more similar to ruby,
# which will return rows in the table where the column is NULL, using
# a query like:
#
# SELECT * FROM table WHERE ((column IN (1)) OR (column IS NULL)))
#
# Similarly, for NOT IN queries:
#
# ds = DB[:table].exclude(column: [1, nil])
# # Default:
# # SELECT * FROM table WHERE (column NOT IN (1, NULL))
# # with split_array_nils extension:
# # SELECT * FROM table WHERE ((column NOT IN (1)) AND (column IS NOT NULL)))
#
# To use this extension with a single dataset:
#
# ds = ds.extension(:split_array_nil)
#
# To use this extension for all of a database's datasets:
#
# DB.extension(:split_array_nil)
#
# Related module: Sequel::Dataset::SplitArrayNil
#
module Sequel
class Dataset
module SplitArrayNil
# Over the IN/NOT IN handling with an array of values where one of the
# values in the array is nil, by removing nils from the array of values,
# and using a separate OR IS NULL clause for IN or AND IS NOT NULL clause
# for NOT IN.
def complex_expression_sql_append(sql, op, args)
case op
when :IN, :"NOT IN"
vals = args[1]
if vals.is_a?(Array) && vals.any?(&:nil?)
cols = args[0]
vals = vals.compact
c = Sequel::SQL::BooleanExpression
if op == :IN
literal_append(sql, c.new(:OR, c.new(:IN, cols, vals), c.new(:IS, cols, nil)))
else
literal_append(sql, c.new(:AND, c.new(:"NOT IN", cols, vals), c.new(:"IS NOT", cols, nil)))
end
else
super
end
else
super
end
end
end
end
Dataset.register_extension(:split_array_nil, Dataset::SplitArrayNil)
end
sequel-5.63.0/lib/sequel/extensions/sql_comments.rb 0000664 0000000 0000000 00000014507 14342141206 0022403 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The sql_comments extension adds Dataset#comment to the datasets,
# allowing you to set SQL comments in the resulting query. These
# comments are appended to the end of the SQL query:
#
# ds = DB[:table].comment("Some Comment").all
# # SELECT * FROM table -- Some Comment
# #
#
# As you can see, this uses single line SQL comments (--) suffixed
# by a newline. This plugin transforms all consecutive whitespace
# in the comment to a single string:
#
# ds = DB[:table].comment("Some\r\nComment Here").all
# # SELECT * FROM table -- Some Comment Here
# #
#
# The reason for the prefixing and suffixing by newlines is to
# work correctly when used in subqueries:
#
# ds = DB[:table].comment("Some\r\nComment Here")
# ds.where(id: ds).all
# # SELECT * FROM table WHERE (id IN (SELECT * FROM table -- Some Comment Here
# # )) -- Some Comment Here
# #
#
# In addition to working on SELECT queries, it also works when
# inserting, updating, and deleting.
#
# Due to the use of single line SQL comments and converting all
# whitespace to spaces, this should correctly handle even
# malicious input. However, it would be unwise to rely on that,
# you should ensure that the argument given
# to Dataset#comment is not derived from user input.
#
# You can load this extension into specific datasets:
#
# ds = DB[:table]
# ds = ds.extension(:sql_comments)
#
# Or you can load it into all of a database's datasets, which
# is probably the desired behavior if you are using this extension:
#
# DB.extension(:sql_comments)
#
# Loading the sql_comments extension into the database also adds
# support for block-level comment support via Database#with_comments.
# You call #with_comments with a hash. Queries inside the hash will
# include a comment based on the hash (assuming they are inside the
# same thread):
#
# DB.with_comments(model: Album, action: :all) do
# DB[:albums].all
# # SELECT * FROM albums -- model:Album,action:all
# end
#
# You can nest calls to #with_comments, which will combine the
# entries from both calls:
#
# DB.with_comments(application: App, path: :scrubbed_path) do
# DB.with_comments(model: Album, action: :all) do
# ds = DB[:albums].all
# # SELECT * FROM albums
# # -- application:App,path:scrubbed_path,model:Album,action:all
# end
# end
#
# You can override comment entries specified in earlier blocks, or
# remove entries specified earlier using a nil value:
#
# DB.with_comments(application: App, path: :scrubbed_path) do
# DB.with_comments(application: Foo, path: nil) do
# ds = DB[:albums].all
# # SELECT * FROM albums # -- application:Foo
# end
# end
#
# You can combine block-level comments with dataset-specific
# comments:
#
# DB.with_comments(model: Album, action: :all) do
# DB[:table].comment("Some Comment").all
# # SELECT * FROM albums -- model:Album,action:all -- Some Comment
# end
#
# Note that Microsoft Access does not support inline comments,
# and attempting to use comments on it will result in SQL syntax
# errors.
#
# Related modules: Sequel::SQLComments, Sequel::Database::SQLComments
#
module Sequel
module SQLComments
# Return a modified copy of the dataset that will use the given comment.
# To uncomment a commented dataset, pass nil as the argument.
def comment(comment)
clone(:comment=>(format_sql_comment(comment) if comment))
end
%w'select insert update delete'.each do |type|
define_method(:"#{type}_sql") do |*a|
sql = super(*a)
if comment = _sql_comment
# This assumes that the comment stored in the dataset has
# already been formatted. If not, this could result in SQL
# injection.
#
# Additionally, due to the use of an SQL comment, if any
# SQL is appened to the query after the comment is added,
# it will become part of the comment unless it is preceded
# by a newline.
if sql.frozen?
sql += comment
sql.freeze
elsif @opts[:append_sql] || @opts[:placeholder_literalizer]
sql << comment
else
sql += comment
end
end
sql
end
end
private
# The comment to include in the SQL query, if any.
def _sql_comment
@opts[:comment]
end
# Format the comment. For maximum compatibility, this uses a
# single line SQL comment, and converts all consecutive whitespace
# in the comment to a single space.
def format_sql_comment(comment)
" -- #{comment.to_s.gsub(/\s+/, ' ')}\n"
end
end
module Database::SQLComments
def self.extended(db)
db.instance_variable_set(:@comment_hashes, {})
db.extend_datasets DatasetSQLComments
end
# A map of threads to comment hashes, used for correctly setting
# comments for all queries inside #with_comments blocks.
attr_reader :comment_hashes
# Store the comment hash and use it to create comments inside the block
def with_comments(comment_hash)
hashes = @comment_hashes
t = Sequel.current
new_hash = if hash = Sequel.synchronize{hashes[t]}
hash.merge(comment_hash)
else
comment_hash.dup
end
yield Sequel.synchronize{hashes[t] = new_hash}
ensure
if hash
Sequel.synchronize{hashes[t] = hash}
else
t && Sequel.synchronize{hashes.delete(t)}
end
end
module DatasetSQLComments
include Sequel::SQLComments
private
# Include comments added via Database#with_comments in the output SQL.
def _sql_comment
specific_comment = super
return specific_comment if @opts[:append_sql]
t = Sequel.current
hashes = db.comment_hashes
block_comment = if comment_hash = Sequel.synchronize{hashes[t]}
comment_array = comment_hash.map{|k,v| "#{k}:#{v}" unless v.nil?}
comment_array.compact!
comment_array.join(",")
end
if block_comment
if specific_comment
format_sql_comment(block_comment + specific_comment)
else
format_sql_comment(block_comment)
end
else
specific_comment
end
end
end
end
Dataset.register_extension(:sql_comments, SQLComments)
Database.register_extension(:sql_comments, Database::SQLComments)
end
sequel-5.63.0/lib/sequel/extensions/sql_expr.rb 0000664 0000000 0000000 00000001067 14342141206 0021531 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The sql_expr extension adds the sql_expr method to every object, which
# returns an wrapped object that works nicely with Sequel's DSL by calling
# Sequel.expr:
#
# 1.sql_expr < :a # 1 < a
# false.sql_expr & :a # FALSE AND a
# true.sql_expr | :a # TRUE OR a
# ~nil.sql_expr # NOT NULL
# "a".sql_expr + "b" # 'a' || 'b'
#
# To load the extension:
#
# Sequel.extension :sql_expr
#
class Object
# Return the object wrapper in an appropriate Sequel expression object.
def sql_expr
Sequel[self]
end
end
sequel-5.63.0/lib/sequel/extensions/sql_log_normalizer.rb 0000664 0000000 0000000 00000006716 14342141206 0023604 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The sql_log_normalizer extension normalizes the SQL that is logged,
# removing the literal strings and numbers in the SQL, and removing the
# logging of any bound variables:
#
# ds = DB[:table].first(a: 1, b: 'something')
# # Without sql_log_normalizer extension
# # SELECT * FROM "table" WHERE (("a" = 1) AND ("b" = 'something')) LIMIT 1
#
# # With sql_log_normalizer_extension
# # SELECT * FROM "table" WHERE (("a" = ?) AND ("b" = ?)) LIMIT ?
#
# The normalization is done by scanning the SQL string being executed
# for literal strings and numbers, and replacing them with question
# marks. While this should work for all or almost all production queries,
# there are pathlogical queries that will not be handled correctly, such as
# the use of apostrophes in identifiers:
#
# DB[:"asf'bar"].where(a: 1, b: 'something').first
# # Logged as:
# # SELECT * FROM "asf?something')) LIMIT ?
#
# The expected use case for this extension is when you want to normalize
# logs to group similar queries, or when you want to protect sensitive
# data from being stored in the logs.
#
# Related module: Sequel::SQLLogNormalizer
#
module Sequel
module SQLLogNormalizer
def self.extended(db)
type = case db.literal("'")
when "''''"
:standard
when "'\\''"
:backslash
when "N''''"
:n_standard
else
raise Error, "SQL log normalization is not supported on this database (' literalized as #{db.literal("'").inspect})"
end
db.instance_variable_set(:@sql_string_escape_type, type)
end
# Normalize the SQL before calling super.
def log_connection_yield(sql, conn, args=nil)
unless skip_logging?
sql = normalize_logged_sql(sql)
args = nil
end
super
end
# Replace literal strings and numbers in SQL with question mark placeholders.
def normalize_logged_sql(sql)
sql = sql.dup
sql.force_encoding('BINARY')
start_index = 0
check_n = @sql_string_escape_type == :n_standard
outside_string = true
if @sql_string_escape_type == :backslash
search_char = /[\\']/
escape_char_offset = 0
escape_char_value = 92 # backslash
else
search_char = "'"
escape_char_offset = 1
escape_char_value = 39 # apostrophe
end
# The approach used here goes against Sequel's philosophy of never attempting
# to parse SQL. However, parsing the SQL is basically the only way to implement
# this support with Sequel's design, and it's better to be pragmatic and accept
# this than not be able to support this.
# Replace literal strings
while outside_string && (index = start_index = sql.index("'", start_index))
if check_n && index != 0 && sql.getbyte(index-1) == 78 # N' start
start_index -= 1
end
index += 1
outside_string = false
while (index = sql.index(search_char, index)) && (sql.getbyte(index + escape_char_offset) == escape_char_value)
# skip escaped characters inside string literal
index += 2
end
if index
# Found end of string
sql[start_index..index] = '?'
start_index += 1
outside_string = true
end
end
# Replace integer and decimal floating point numbers
sql.gsub!(/\b-?\d+(?:\.\d+)?\b/, '?')
sql
end
end
Database.register_extension(:sql_log_normalizer, SQLLogNormalizer)
end
sequel-5.63.0/lib/sequel/extensions/sqlite_json_ops.rb 0000664 0000000 0000000 00000021162 14342141206 0023105 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The sqlite_json_ops extension adds support to Sequel's DSL to make
# it easier to call SQLite JSON functions and operators (added
# first in SQLite 3.38.0).
#
# To load the extension:
#
# Sequel.extension :sqlite_json_ops
#
# This extension works by calling methods on Sequel::SQLite::JSONOp objects,
# which you can create via Sequel.sqlite_json_op:
#
# j = Sequel.sqlite_json_op(:json_column)
#
# Also, on most Sequel expression objects, you can call the sqlite_json_op method
# to create a Sequel::SQLite::JSONOp object:
#
# j = Sequel[:json_column].sqlite_json_op
#
# If you have loaded the {core_extensions extension}[rdoc-ref:doc/core_extensions.rdoc],
# or you have loaded the core_refinements extension
# and have activated refinements for the file, you can also use Symbol#sqlite_json_op:
#
# j = :json_column.sqlite_json_op
#
# The following methods are available for Sequel::SQLite::JSONOp instances:
#
# j[1] # (json_column ->> 1)
# j.get(1) # (json_column ->> 1)
# j.get_text(1) # (json_column -> 1)
# j.extract('$.a') # json_extract(json_column, '$.a')
#
# j.array_length # json_array_length(json_column)
# j.type # json_type(json_column)
# j.valid # json_valid(json_column)
# j.json # json(json_column)
#
# j.insert('$.a', 1) # json_insert(json_column, '$.a', 1)
# j.set('$.a', 1) # json_set(json_column, '$.a', 1)
# j.replace('$.a', 1) # json_replace(json_column, '$.a', 1)
# j.remove('$.a') # json_remove(json_column, '$.a')
# j.patch('{"a":2}') # json_patch(json_column, '{"a":2}')
#
# j.each # json_each(json_column)
# j.tree # json_tree(json_column)
#
# Related modules: Sequel::SQLite::JSONOp
#
module Sequel
module SQLite
# The JSONOp class is a simple container for a single object that
# defines methods that yield Sequel expression objects representing
# SQLite json operators and functions.
#
# In the method documentation examples, assume that:
#
# json_op = Sequel.sqlite_json_op(:json)
class JSONOp < Sequel::SQL::Wrapper
GET = ["(".freeze, " ->> ".freeze, ")".freeze].freeze
private_constant :GET
GET_JSON = ["(".freeze, " -> ".freeze, ")".freeze].freeze
private_constant :GET_JSON
# Returns an expression for getting the JSON array element or object field
# at the specified path as a SQLite value.
#
# json_op[1] # (json ->> 1)
# json_op['a'] # (json ->> 'a')
# json_op['$.a.b'] # (json ->> '$.a.b')
# json_op['$[1][2]'] # (json ->> '$[1][2]')
def [](key)
json_op(GET, key)
end
alias get []
# Returns an expression for the length of the JSON array, or the JSON array at
# the given path.
#
# json_op.array_length # json_array_length(json)
# json_op.array_length('$[1]') # json_array_length(json, '$[1]')
def array_length(*args)
Sequel::SQL::NumericExpression.new(:NOOP, function(:array_length, *args))
end
# Returns an expression for a set of information extracted from the top-level
# members of the JSON array or object, or the top-level members of the JSON array
# or object at the given path.
#
# json_op.each # json_each(json)
# json_op.each('$.a') # json_each(json, '$.a')
def each(*args)
function(:each, *args)
end
# Returns an expression for the JSON array element or object field at the specified
# path as a SQLite value, but only accept paths as arguments, and allow the use of
# multiple paths.
#
# json_op.extract('$.a') # json_extract(json, '$.a')
# json_op.extract('$.a', '$.b') # json_extract(json, '$.a', '$.b')
def extract(*a)
function(:extract, *a)
end
# Returns an expression for getting the JSON array element or object field at the
# specified path as a JSON value.
#
# json_op.get_json(1) # (json -> 1)
# json_op.get_json('a') # (json -> 'a')
# json_op.get_json('$.a.b') # (json -> '$.a.b')
# json_op.get_json('$[1][2]') # (json -> '$[1][2]')
def get_json(key)
self.class.new(json_op(GET_JSON, key))
end
# Returns an expression for creating new entries at the given paths in the JSON array
# or object, but not overwriting existing entries.
#
# json_op.insert('$.a', 1) # json_insert(json, '$.a', 1)
# json_op.insert('$.a', 1, '$.b', 2) # json_insert(json, '$.a', 1, '$.b', 2)
def insert(path, value, *args)
wrapped_function(:insert, path, value, *args)
end
# Returns an expression for a minified version of the JSON.
#
# json_op.json # json(json)
def json
self.class.new(SQL::Function.new(:json, self))
end
alias minify json
# Returns an expression for updating the JSON object using the RFC 7396 MergePatch algorithm
#
# json_op.patch('{"a": 1, "b": null}') # json_patch(json, '{"a": 1, "b": null}')
def patch(json_patch)
wrapped_function(:patch, json_patch)
end
# Returns an expression for removing entries at the given paths from the JSON array or object.
#
# json_op.remove('$.a') # json_remove(json, '$.a')
# json_op.remove('$.a', '$.b') # json_remove(json, '$.a', '$.b')
def remove(path, *paths)
wrapped_function(:remove, path, *paths)
end
# Returns an expression for replacing entries at the given paths in the JSON array or object,
# but not creating new entries.
#
# json_op.replace('$.a', 1) # json_replace(json, '$.a', 1)
# json_op.replace('$.a', 1, '$.b', 2) # json_replace(json, '$.a', 1, '$.b', 2)
def replace(path, value, *args)
wrapped_function(:replace, path, value, *args)
end
# Returns an expression for creating or replacing entries at the given paths in the
# JSON array or object.
#
# json_op.set('$.a', 1) # json_set(json, '$.a', 1)
# json_op.set('$.a', 1, '$.b', 2) # json_set(json, '$.a', 1, '$.b', 2)
def set(path, value, *args)
wrapped_function(:set, path, value, *args)
end
# Returns an expression for a set of information extracted from the JSON array or object, or
# the JSON array or object at the given path.
#
# json_op.tree # json_tree(json)
# json_op.tree('$.a') # json_tree(json, '$.a')
def tree(*args)
function(:tree, *args)
end
# Returns an expression for the type of the JSON value or the JSON value at the given path.
#
# json_op.type # json_type(json)
# json_op.type('$[1]') # json_type(json, '$[1]')
def type(*args)
Sequel::SQL::StringExpression.new(:NOOP, function(:type, *args))
end
alias typeof type
# Returns a boolean expression for whether the JSON is valid or not.
def valid
Sequel::SQL::BooleanExpression.new(:NOOP, function(:valid))
end
private
# Internals of the [], get, get_json methods, using a placeholder literal string.
def json_op(str, args)
self.class.new(Sequel::SQL::PlaceholderLiteralString.new(str, [self, args]))
end
# Internals of the methods that return functions prefixed with +json_+.
def function(name, *args)
SQL::Function.new("json_#{name}", self, *args)
end
# Internals of the methods that return functions prefixed with +json_+, that
# return JSON values.
def wrapped_function(*args)
self.class.new(function(*args))
end
end
module JSONOpMethods
# Wrap the receiver in an JSONOp so you can easily use the SQLite
# json functions and operators with it.
def sqlite_json_op
JSONOp.new(self)
end
end
end
module SQL::Builders
# Return the object wrapped in an SQLite::JSONOp.
def sqlite_json_op(v)
case v
when SQLite::JSONOp
v
else
SQLite::JSONOp.new(v)
end
end
end
class SQL::GenericExpression
include Sequel::SQLite::JSONOpMethods
end
class LiteralString
include Sequel::SQLite::JSONOpMethods
end
end
# :nocov:
if Sequel.core_extensions?
class Symbol
include Sequel::SQLite::JSONOpMethods
end
end
if defined?(Sequel::CoreRefinements)
module Sequel::CoreRefinements
refine Symbol do
send INCLUDE_METH, Sequel::SQLite::JSONOpMethods
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/string_agg.rb 0000664 0000000 0000000 00000012104 14342141206 0022012 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The string_agg extension adds the ability to perform database-independent
# aggregate string concatentation. For example, with a table like:
#
# c1 | c2
# ---+---
# a | 1
# a | 2
# a | 3
# b | 4
#
# You can return a result set like:
#
# c1 | c2s
# ---+---
# a | 1,2,3
# b | 4
#
# First, you need to load the extension into the database:
#
# DB.extension :string_agg
#
# Then you can use the Sequel.string_agg method to return a Sequel
# expression:
#
# sa = Sequel.string_agg(:column_name)
# # or:
# sa = Sequel.string_agg(:column_name, '-') # custom separator
#
# You can specify the order in which the concatention happens by
# calling +order+ on the expression:
#
# sa = Sequel.string_agg(:column_name).order(:other_column)
#
# Additionally, if you want to have the concatenation only operate
# on distinct values, you can call distinct:
#
# sa = Sequel.string_agg(:column_name).order(:other_column).distinct
#
# These expressions can be used in your datasets, or anywhere else that
# Sequel expressions are allowed:
#
# DB[:table].
# select_group(:c1).
# select_append(Sequel.string_agg(:c2))
#
# This extension currenly supports the following databases:
#
# * PostgreSQL 9+
# * SQLAnywhere 12+
# * Oracle 11g+ (except distinct)
# * DB2 9.7+ (except distinct)
# * MySQL
# * HSQLDB
# * H2
#
# Related module: Sequel::SQL::StringAgg
#
module Sequel
module SQL
module Builders
# Return a StringAgg expression for an aggregate string concatentation.
def string_agg(*a)
StringAgg.new(*a)
end
end
# The StringAgg class represents an aggregate string concatentation.
class StringAgg < GenericExpression
include StringMethods
include StringConcatenationMethods
include InequalityMethods
include AliasMethods
include CastMethods
include OrderMethods
include PatternMatchMethods
include SubscriptMethods
# These methods are added to datasets using the string_agg
# extension, for the purposes of correctly literalizing StringAgg
# expressions for the appropriate database type.
module DatasetMethods
# Append the SQL fragment for the StringAgg expression to the SQL query.
def string_agg_sql_append(sql, sa)
if defined?(super)
return super
end
expr = sa.expr
separator = sa.separator || ","
order = sa.order_expr
distinct = sa.is_distinct?
case db_type = db.database_type
when :postgres, :sqlanywhere
f = Function.new(db_type == :postgres ? :string_agg : :list, expr, separator)
if order
f = f.order(*order)
end
if distinct
f = f.distinct
end
literal_append(sql, f)
when :mysql, :hsqldb, :h2
sql << "GROUP_CONCAT("
if distinct
sql << "DISTINCT "
end
literal_append(sql, expr)
if order
sql << " ORDER BY "
expression_list_append(sql, order)
end
sql << " SEPARATOR "
literal_append(sql, separator)
sql << ")"
when :oracle, :db2
if distinct
raise Error, "string_agg with distinct is not implemented on #{db.database_type}"
end
literal_append(sql, Function.new(:listagg, expr, separator))
if order
sql << " WITHIN GROUP (ORDER BY "
expression_list_append(sql, order)
sql << ")"
else
sql << " WITHIN GROUP (ORDER BY 1)"
end
else
raise Error, "string_agg is not implemented on #{db.database_type}"
end
end
end
# The string expression for each row that will concatenated to the output.
attr_reader :expr
# The separator between each string expression.
attr_reader :separator
# The expression that the aggregation is ordered by.
attr_reader :order_expr
# Set the expression and separator
def initialize(expr, separator=nil)
@expr = expr
@separator = separator
yield self if defined?(yield)
freeze
end
# Whether the current expression uses distinct expressions
def is_distinct?
@distinct == true
end
# Return a modified StringAgg that uses distinct expressions
def distinct
self.class.new(@expr, @separator) do |sa|
sa.instance_variable_set(:@order_expr, @order_expr) if @order_expr
sa.instance_variable_set(:@distinct, true)
end
end
# Return a modified StringAgg with the given order
def order(*o)
self.class.new(@expr, @separator) do |sa|
sa.instance_variable_set(:@distinct, @distinct) if @distinct
sa.instance_variable_set(:@order_expr, o.empty? ? nil : o.freeze)
end
end
to_s_method :string_agg_sql
end
end
Dataset.register_extension(:string_agg, SQL::StringAgg::DatasetMethods)
end
sequel-5.63.0/lib/sequel/extensions/string_date_time.rb 0000664 0000000 0000000 00000002655 14342141206 0023221 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The string_date_time extension provides String instance methods
# for converting the strings to a date (e.g. String#to_date), allowing
# for backwards compatibility with legacy Sequel code.
#
# These methods calls +parse+ on the related class, and as such, can
# result in denial of service in older versions of Ruby for large
# untrusted input, and raise exceptions in newer versions of Ruby.
#
# To load the extension:
#
# Sequel.extension :string_date_time
class String
# Converts a string into a Date object.
def to_date
Date.parse(self, Sequel.convert_two_digit_years)
rescue => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
# Converts a string into a DateTime object.
def to_datetime
DateTime.parse(self, Sequel.convert_two_digit_years)
rescue => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
# Converts a string into a Time or DateTime object, depending on the
# value of Sequel.datetime_class
def to_sequel_time
if Sequel.datetime_class == DateTime
DateTime.parse(self, Sequel.convert_two_digit_years)
else
Sequel.datetime_class.parse(self)
end
rescue => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
# Converts a string into a Time object.
def to_time
Time.parse(self)
rescue => e
raise Sequel.convert_exception_class(e, Sequel::InvalidValue)
end
end
sequel-5.63.0/lib/sequel/extensions/symbol_aref.rb 0000664 0000000 0000000 00000002434 14342141206 0022175 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The symbol_aref extension makes Symbol#[] support Symbol,
# Sequel::SQL::Indentifier, and Sequel::SQL::QualifiedIdentifier instances,
# returning appropriate Sequel::SQL::QualifiedIdentifier instances. It's
# designed as a shortcut so that instead of:
#
# Sequel[:table][:column] # table.column
#
# you can just write:
#
# :table[:column] # table.column
#
# To load the extension:
#
# Sequel.extension :symbol_aref
#
# If you are using Ruby 2+, and you would like to use refinements, there
# is a refinement version of this in the symbol_aref_refinement extension.
#
# Related module: Sequel::SymbolAref
if RUBY_VERSION >= '2.0'
module Sequel::SymbolAref
def [](v)
case v
when Symbol, Sequel::SQL::Identifier, Sequel::SQL::QualifiedIdentifier
Sequel::SQL::QualifiedIdentifier.new(self, v)
else
super
end
end
end
class Symbol
prepend Sequel::SymbolAref
end
# :nocov:
else
class Symbol
if method_defined?(:[])
alias_method :aref_before_sequel, :[]
end
def [](v)
case v
when Symbol, Sequel::SQL::Identifier, Sequel::SQL::QualifiedIdentifier
Sequel::SQL::QualifiedIdentifier.new(self, v)
else
aref_before_sequel(v)
end
end
end
end
# :nocov:
sequel-5.63.0/lib/sequel/extensions/symbol_aref_refinement.rb 0000664 0000000 0000000 00000002144 14342141206 0024407 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The symbol_aref_refinement extension adds a refinement that makes
# Symbol#[] support Symbol, #Sequel::SQL::Indentifier, and
# Sequel::SQL::QualifiedIdentifier instances, returning appropriate
# Sequel::SQL::QualifiedIdentifier instances. It's designed as a
# shortcut so that instead of:
#
# Sequel[:table][:column] # table.column
#
# you can just write:
#
# :table[:column] # table.column
#
# To load the extension:
#
# Sequel.extension :symbol_aref_refinement
#
# To enable the refinement for the current file:
#
# using Sequel::SymbolAref
#
# If you would like this extension to be enabled globally instead
# of as a refinement, use the symbol_aref extension.
#
# Related module: Sequel::SymbolAref
# :nocov:
raise(Sequel::Error, "Refinements require ruby 2.0.0 or greater") unless RUBY_VERSION >= '2.0.0'
# :nocov:
module Sequel::SymbolAref
refine Symbol do
def [](v)
case v
when Symbol, Sequel::SQL::Identifier, Sequel::SQL::QualifiedIdentifier
Sequel::SQL::QualifiedIdentifier.new(self, v)
else
super
end
end
end
end
sequel-5.63.0/lib/sequel/extensions/symbol_as.rb 0000664 0000000 0000000 00000001017 14342141206 0021657 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The symbol_as extension adds Symbol#as, for creating
# Sequel::SQL::AliasedExpression objects. It's
# designed as a shortcut so that instead of:
#
# Sequel[:column].as(:alias)
#
# you can just write:
#
# :column.as(:alias)
#
# To load the extension:
#
# Sequel.extension :symbol_as
#
# If you are using Ruby 2+, and you would like to use refinements, there
# is a refinement version of this in the symbol_as_refinement extension.
#
class Symbol
include Sequel::SQL::AliasMethods
end
sequel-5.63.0/lib/sequel/extensions/symbol_as_refinement.rb 0000664 0000000 0000000 00000001622 14342141206 0024075 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The symbol_as_refinement extension adds a refinement that makes
# Symbol#as return Sequel::SQL::AliasedExpression instances. It's
# designed as a shortcut so that instead of:
#
# Sequel[:column].as(:alias) # column AS alias
#
# you can just write:
#
# :column.as(:alias) # column AS alias
#
# To load the extension:
#
# Sequel.extension :symbol_as_refinement
#
# To enable the refinement for the current file:
#
# using Sequel::SymbolAs
#
# If you would like this extension to be enabled globally instead
# of as a refinement, use the symbol_as extension.
#
# Related module: Sequel::SymbolAs
# :nocov:
raise(Sequel::Error, "Refinements require ruby 2.0.0 or greater") unless RUBY_VERSION >= '2.0.0'
# :nocov:
module Sequel::SymbolAs
refine Symbol do
def as(aliaz, columns=nil)
Sequel::SQL::AliasedExpression.new(self, aliaz, columns)
end
end
end
sequel-5.63.0/lib/sequel/extensions/synchronize_sql.rb 0000664 0000000 0000000 00000003402 14342141206 0023121 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The synchronize_sql extension checks out a connection from the pool while
# generating an SQL string. In cases where a connection is necessary
# in order to properly escape input, and multiple inputs in the query need
# escaping, this can result in fewer connection checkouts and better
# overall performance. In other cases this results in a performance decrease
# because a connection is checked out and either not used or kept checked out
# longer than necessary.
#
# The adapters where this extension may improve performance include amalgalite,
# mysql2, postgres, jdbc/postgresql, and tinytds. In these adapters, escaping
# strings requires a connection object for as proper escaping requires calling
# an escaping method on the connection object.
#
# This extension is most helpful when dealing with queries with lots of
# strings that need escaping (e.g. IN queries with long lists). By default,
# a connection will be checked out and back in for each string to be escaped,
# which under high contention can cause the query to spend longer generating
# the SQL string than the actual pool timeout (since every individual checkout
# will take less than the timeout, but the sum of all of them can be greater).
#
# This extension is unnecessary and will decrease performance if the single
# threaded connection pool is used.
#
module Sequel
class Dataset
module SynchronizeSQL
%w'insert select update delete'.each do |type|
define_method(:"#{type}_sql") do |*args|
if @opts[:sql].is_a?(String)
return super(*args)
end
db.synchronize(@opts[:server]) do
super(*args)
end
end
end
end
register_extension(:synchronize_sql, SynchronizeSQL)
end
end
sequel-5.63.0/lib/sequel/extensions/thread_local_timezones.rb 0000664 0000000 0000000 00000004164 14342141206 0024413 0 ustar 00root root 0000000 0000000 # frozen-string-literal: true
#
# The thread_local_timezones extension allows you to set a per-thread timezone that
# will override the default global timezone while the thread is executing. The
# main use case is for web applications that execute each request in its own thread,
# and want to set the timezones based on the request.
#
# To load the extension:
#
# Sequel.extension :thread_local_timezones
#
# The most common example is having the database always store time in
# UTC, but have the application deal with the timezone of the current
# user. That can be done with:
#
# Sequel.database_timezone = :utc
# # In each thread:
# Sequel.thread_application_timezone = current_user.timezone
#
# This extension is designed to work with the named_timezones extension.
#
# This extension adds the thread_application_timezone=, thread_database_timezone=,
# and thread_typecast_timezone= methods to the Sequel module. It overrides
# the application_timezone, database_timezone, and typecast_timezone
# methods to check the related thread local timezone first, and use it if present.
# If the related thread local timezone is not present, it falls back to the
# default global timezone.
#
# There is one special case of note. If you have a default global timezone
# and you want to have a nil thread local timezone, you have to set the thread
# local value to :nil instead of nil:
#
# Sequel.application_timezone = :utc
# Sequel.thread_application_timezone = nil
# Sequel.application_timezone # => :utc
# Sequel.thread_application_timezone = :nil
# Sequel.application_timezone # => nil
#
# Related module: Sequel::ThreadLocalTimezones
#
module Sequel
module ThreadLocalTimezones
%w'application database typecast'.each do |t|
class_eval("def thread_#{t}_timezone=(tz); Thread.current[:#{t}_timezone] = convert_timezone_setter_arg(tz); end", __FILE__, __LINE__)
class_eval(<