pax_global_header00006660000000000000000000000064143331607420014515gustar00rootroot0000000000000052 comment=be2429722bb88517bae6473b64164890c7ab8392 moneta-1.5.2/000077500000000000000000000000001433316074200130055ustar00rootroot00000000000000moneta-1.5.2/.github/000077500000000000000000000000001433316074200143455ustar00rootroot00000000000000moneta-1.5.2/.github/workflows/000077500000000000000000000000001433316074200164025ustar00rootroot00000000000000moneta-1.5.2/.github/workflows/ruby.yml000066400000000000000000000232411433316074200201100ustar00rootroot00000000000000name: Ruby CI on: push: branches: [ main ] pull_request: branches: [ main ] jobs: adapters: runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} strategy: fail-fast: false matrix: ruby-version: ['3.0', '2.7', '2.6', '2.5', '2.4', jruby] adapter: - Client - Cookie - Daybreak #- dbm - File - Fog - GDBM - HashFile #- HBase - KyotoCabinet - LevelDB - LMDB - LocalMemCache - LRUHash - Memory - 'Null' - PStore - RestClient #- riak - SDBM - TDB - TokyoCabinet - TokyoTyrant - YAML env: BUNDLE_WITH: ${{ matrix.adapter }} snappy steps: - uses: actions/checkout@v2 - name: Apt update run: sudo apt-get update - name: Apt install dependencies run: sudo apt-get install -y libkyotocabinet-dev libleveldb-dev libtdb-dev libtokyocabinet-dev tokyotyrant libgdbm-dev - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t ~unstable -t adapter:${{ matrix.adapter }} -- spec/moneta couch: name: "Couch adapter" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} services: couch: image: couchdb env: COUCHDB_USER: admin COUCHDB_PASSWORD: password options: >- --health-cmd "curl -s http://127.0.0.1:5984/" --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 5984:5984 strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: RestClient steps: - uses: actions/checkout@v2 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t adapter:Couch -- spec/moneta cassandra: name: "Cassandra adapter" runs-on: ubuntu-latest services: cassandra: image: cassandra options: >- --health-cmd "cqlsh -e \"SELECT cql_version FROM system.local\"" --health-interval 10s --health-timeout 10s --health-retries 10 ports: - 9042:9042 strategy: matrix: ruby-version: ['2.7', '2.4', jruby] env: BUNDLE_WITH: Cassandra steps: - uses: actions/checkout@v2 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t adapter:Cassandra -- spec/moneta mysql-adapters: name: "MySQL adapters" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} services: mysql: image: mysql env: MYSQL_ROOT_PASSWORD: moneta options: >- --health-cmd "mysqladmin ping -h 127.0.0.1 --silent" --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 3306:3306 strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: ActiveRecord Sequel DataMapper mysql MYSQL_HOST: 127.0.0.1 MONETA_MYSQL_PASSWORD: moneta steps: - uses: actions/checkout@v2 - name: Apt update run: sudo apt-get update - name: Apt install mysql packages run: sudo apt-get install -y libmysqlclient-dev - name: Create Database moneta run: mysqladmin -h 127.0.0.1 -u root create moneta env: MYSQL_PWD: moneta - name: Create Database moneta2 run: mysqladmin -h 127.0.0.1 -u root create moneta2 env: MYSQL_PWD: moneta - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t mysql -- spec/moneta postgres-adapters: name: "Postgres adapters" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} services: postgres: image: postgres:13.6 env: POSTGRES_PASSWORD: moneta options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 5432:5432 strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: ActiveRecord Sequel DataMapper postgresql PGHOST: localhost PGUSER: postgres PGPASSWORD: moneta steps: - uses: actions/checkout@v2 - name: Create Database moneta1 run: createdb moneta1 - name: Create Database moneta2 run: createdb moneta2 - name: Add hstore support to moneta1 run: psql -c 'create extension hstore;' moneta1 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t postgres -- spec/moneta sqlite-adapters: name: "SQLite adapters" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: ActiveRecord Sequel DataMapper sqlite steps: - uses: actions/checkout@v2 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t sqlite -- spec/moneta redis: name: "Redis adapters" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} services: redis: image: redis options: >- --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 6379:6379 strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: Redis steps: - uses: actions/checkout@v2 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t adapter:Redis -t redis -- spec/moneta memcached: name: "Memcached adapters" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: Memcached steps: - uses: actions/checkout@v2 - name: Apt update run: sudo apt-get update - name: Apt install memcached run: sudo apt-get install -y memcached libsasl2-dev - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t ~unstable -t adapter:Memcached -t memcached -- spec/moneta mongo: name: "Mongo adapter" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} services: mongo: image: mongo:4.4 options: >- --health-cmd "mongo --eval db" --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 27017:27017 strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: Mongo steps: - uses: actions/checkout@v2 - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t adapter:Mongo -- spec/moneta residual-specs: name: "Proxies and other specs" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} strategy: matrix: ruby-version: ['3.0', '2.7', '2.4', jruby] env: BUNDLE_WITH: transformers GDBM Fog steps: - uses: actions/checkout@v2 - name: Apt update run: sudo apt-get update - name: Apt install dependencies run: sudo apt-get install -y libgdbm-dev liblzo2-dev - name: Set up Ruby ${{ matrix.ruby-version }} uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler: latest bundler-cache: true - name: Rspec run: bundle exec rspec -t ~unstable --exclude-pattern "./spec/{moneta/adapters/**/*,active_support/**/*}" - name: Minitest run: bundle exec ruby test/action_dispatch/session_moneta_store_test.rb rubocop: name: "Rubocop" runs-on: ubuntu-latest continue-on-error: ${{ matrix.ruby-version == '3.0' }} steps: - uses: actions/checkout@v2 - name: Set up Ruby uses: ruby/setup-ruby@v1 with: ruby-version: '3.0' bundler: latest bundler-cache: true - name: Rubocop run: bundle exec rubocop lib moneta-1.5.2/.gitignore000066400000000000000000000002031433316074200147700ustar00rootroot00000000000000.yardoc doc attic spec/tmp *~ *.swp *.rdb .#* script/benchmarks.tmp Gemfile.lock logs secure .bundle .ruby-version .byebug_history moneta-1.5.2/.rspec000066400000000000000000000000721433316074200141210ustar00rootroot00000000000000--color --require helper --tag ~unsupported --tag ~broken moneta-1.5.2/.rubocop.yml000066400000000000000000000055551433316074200152710ustar00rootroot00000000000000AllCops: TargetRubyVersion: 2.3.0 UseCache: true Exclude: - hbase/**/* - script/**/* - vendor/**/* Layout/ArrayAlignment: Exclude: - lib/moneta/transformer/config.rb Layout/DotPosition: EnforcedStyle: leading Layout/EmptyLineAfterGuardClause: Enabled: false Layout/ExtraSpacing: Exclude: - lib/moneta.rb - lib/moneta/transformer/config.rb Layout/HashAlignment: Exclude: - lib/moneta/transformer/config.rb Layout/LineLength: Max: 160 Layout/MultilineMethodCallIndentation: EnforcedStyle: indented Layout/MultilineOperationIndentation: EnforcedStyle: indented Layout/SpaceInsideArrayLiteralBrackets: Exclude: - lib/moneta/transformer/config.rb Lint/AssignmentInCondition: Enabled: false Lint/RaiseException: Enabled: true Lint/ShadowedException: Enabled: false Lint/ShadowingOuterLocalVariable: Enabled: false Lint/StructNewOverride: Enabled: true Lint/UnusedMethodArgument: Enabled: false Lint/Void: Exclude: - spec/**/* Metrics/AbcSize: Enabled: false Metrics/BlockNesting: Enabled: false Metrics/ClassLength: Enabled: false Metrics/CyclomaticComplexity: Enabled: false Metrics/BlockLength: Exclude: - spec/**/* Metrics/MethodLength: Enabled: false Metrics/ModuleLength: Enabled: false Metrics/ParameterLists: Enabled: false Metrics/PerceivedComplexity: Enabled: false Naming/ConstantName: Exclude: - lib/rack/cache/moneta.rb Naming/RescuedExceptionsVariableName: Enabled: false Naming/MemoizedInstanceVariableName: Enabled: false Security/MarshalLoad: Enabled: false Style/AndOr: Enabled: false Style/CaseEquality: Enabled: false Style/CharacterLiteral: Enabled: false Style/DoubleNegation: Enabled: false Style/GlobalVars: Exclude: - spec/moneta/adapters/cassandra/helper.rb Style/FrozenStringLiteralComment: Enabled: false Style/FormatString: Enabled: false Style/GuardClause: Enabled: false Style/HashEachMethods: Enabled: true Style/HashTransformKeys: Enabled: true Style/HashTransformValues: Enabled: true Style/IfUnlessModifier: Enabled: false Style/Lambda: EnforcedStyle: lambda Style/ModuleFunction: Enabled: false Style/MultilineIfModifier: Enabled: false Style/MutableConstant: Exclude: - lib/rack/cache/moneta.rb Style/NilComparison: EnforcedStyle: comparison Style/NumericLiterals: Enabled: false Style/NumericPredicate: EnforcedStyle: comparison Style/NonNilCheck: Enabled: false Style/ParallelAssignment: Enabled: false Style/PercentLiteralDelimiters: Enabled: false Style/PreferredHashMethods: Enabled: false Style/RescueStandardError: EnforcedStyle: implicit Style/RescueModifier: Enabled: false Style/SafeNavigation: Enabled: false Style/StringLiterals: Enabled: false Style/SymbolArray: Enabled: false Style/SymbolProc: Enabled: false Style/YodaCondition: Enabled: false moneta-1.5.2/.yardopts000066400000000000000000000000611433316074200146500ustar00rootroot00000000000000- README.md SPEC.md CHANGES LICENSE CONTRIBUTORS moneta-1.5.2/CHANGES000066400000000000000000000215231433316074200140030ustar00rootroot000000000000001.5.2 * Proxy - handle returning config when the adapter does not use config * Avoid calling `supports?(:expires)` when expiry isn't needed * Adapters::Mongo - fix `merge!` behaviour when no values are inserted 1.5.1 * Adapters::File - fix an implicit hash issue in Ruby 3 (#222) 1.5.0 * Adapters - internally, most (all?) adapters now inherit from a base Adapter class * Adapters - adapter configuration is now accessed via a `config` method on each adapter - see the code for examples. This shouldn't affect users unless they were directly modifying adapters' attributes. * Redis - fix deprecation warnings for Redis 4.6+ multi/pipelined handling (#215) * Mongo - slight improvement to error handling 1.4.2 * Pool - fix busy-loop issue (#197) 1.4.1 * Adapters::Mongo - deprecate :db option, document :database option (#194) * Adapters::Mongo - add retries to increment operation 1.4.0 * Adapters::Mongo - drop support for moped gem (#182) * Adapters::Redis - use #exists? where available (#189) * Some reorganisation of code into more separate files (#177) 1.3.0 * Transformer - add :each_key support (#170) * Server - add :each_key support, use non-blocking IO (#165) * Builder - dup options before passing to adapter/proxy (#174) * Adapter::Couch - add HTTP basic auth support * Support MRI 2.7.0 (#172) * Minimum required MRI version is now 2.3.0 (#172) 1.2.1 * Transformer - fix :escape transformer deserialize implementation (#168) 1.2.0 * Adapters::Sequel - fix for compatibility with new version of JDBC SQLite * Adapters::Couch - refactor of error handling, #clear, #merge!, #slice, rev caching * Fallback - add fallback proxy (#162) * Pool - rewrite to enable limiting of size, gradual shrinking * Enumerable - add proxy providing Enumerable API (using #each_key) * Adapters::Couch, Adapters::RestClient - add Faraday :adapter option * Adapters::Couch - add :full_commit and :batch options to some operations * Adapters::LRUHash - rewrite to take advantage of ordered hashes * Adapters::ActiveRecord - recover from deadlock during increment 1.1.1 * Adapters::Sequel - use prepared statements * Adapters::Sqlite - use upsert for increment where supported 1.1.0 * Adapters::ActiveRecord - rewrite to use Arel directly; support for Rails 5 * Moneta::Server - close all connections when stopping * Moneta::Shared - recover from socket errors * Transformer - add :urlsafe_base64; use this by default for keys with Couch adapter * Adapters::MongoMoped - recover from failed increment * Moneta::Pool - fix race condition in #pop (#144) * Moneta::Client - raise EOFError if a read fails * Moneta::Expires - use Rational objects to get much more accurate time resolution * Moneta::Lock/Moneta::Pool - allow wrapped methods to call other wrapped methods * Adapters::Sequel - add optimisations for MySQL, PostgreSQL and SQLite * Adapters::Sequel - add Postgres+HStore backend * Add Adapters::ActiveSupportCache * Adapters::Sqlite - add :journal_mode option * Add table creation options to Sequel and ActiveRecord adapters * Adapters::ActiveRecord - support for forking (#159) * Adapters::Cassandra - rewrite to use cassandra-driver gem (#81) * Adapters::Couch - add a LRUHash to cache document revs * Adapters::KyotoCabinet - implement atomic increment * Add :each_key feature and implemented on many adapters; add Moneta::WeakEachKey (#143; see feature matrix) * Add bulk read/write methods to the spec; added default implementation to Defaults and fast versions in many adapters (#116; see feature matrix) * First class support for latest JRuby (#160) * Minimum required MRI version is now 2.2.2 (#135) 1.0.0 * Adapters::Sequel - allow usage of Sequel extensions and connection validation * ActiveSupport::Cache::MonetaStore - dup options before mutating them * ActiveSupport::Cache::MonetaStore - allow writing raw values 0.8.1 * Adapters::TokyoTyrant - more consistent error handling * Adapters::MongoMoped - support for moped gem v2.0 * Adapters::MongoOfficial - support for mongo gem versions 2-4 * Adapters::File - fix a bug in #load (#74) * Adapters::LRUHash - allow to disable the limits by passing nil for max_size, max_count * Transformer - don't use OpenSSL::Digest::Digest namespace * Adapters::Sequel - fix issue with unknown "blob" type * Rack::Cache - fix deprecated require paths * Adapters::MemcachedNative - properly close connections * Transformer - support bson gem versions 2-4 * Transformer - switch to rbzip2 gem for bzip2 support * Adapters::MemcachedDalli - #create returns a boolean 0.8.0 * Rename Moneta::Adapters::Mongo to Moneta::Adapters::MongoOfficial * Add Moneta::Adapters::MongoMoped * Drop Ruby 1.8 support 0.7.20 * Adapters::LRUHash: add option :max_value * Moneta.new(:Couch, :Riak, :RestClient): use urlencode instead of base64 for key encoding) * Transformer: Add :hex encoder * Transformer: Don't wrap object in array for JSON serialization * Transformer: Add :php serializer * Moneta.new(:Sequel) - Don't encode blob data using base64 * Moneta::Adapters::LMDB added (Symas Lightning Memory-Mapped Database) * Moneta::Adapters::Sequel - Fix for https://github.com/jeremyevans/sequel/issues/715 0.7.19 * ActionDispatch::Session::MonetaStore fixed for Rails 4 * Moneta::Server: Tries now to remove stale unix socket * Moneta::Server: More robust and better performance 0.7.18 * Adapters::File#increment and #create fixed on JRuby * Adapters::Couch and Adapters::Mongo can store hashes directly as documents. It is not necessary to serialize values as strings anymore. * Adapters::Couch#create added * Pool thread safety improved * Transformer: Add CityHash 0.7.17 * Transformer: LZ4 compression added 0.7.16 * Better builder validation * Adapters::Sequel: check for correct exceptions 0.7.15 * CONTRIBUTORS file added * Adapters::File#increment fixed 0.7.14 * Adapters::ActiveRecord, Adapters::Sequel: store values as blobs * Adapters::ActiveRecord fixed and improved 0.7.13 * Adapters::ActiveRecord: Use connection_pool * Adapters::File: Race condition in #increment fixed 0.7.12 * Concurrency tests added * Bugfixes for File, Sqlite, Sequel and Datamapper, ActiveRecord 0.7.11 * Logger: Add option :file * Adapters::TokyoTyrant supports both native (ruby-tokyotyrant) and pure-ruby tokyotyrant gems * Adapters::Couch use Faraday directly instead of buggy CouchRest * Adapters::RestClient use Faraday * Transformer: add quoted printable encoding (:qp) 0.7.10 * Adapters::TokyoTyrant added * Add attr_reader :backend and option :backend to some adapters * Cache rename #backend to #adapter 0.7.9 * Adapters::KyotoCabinet added * Feature detection methods #features and #supports? added * Validity checks added which check features 0.7.8 * Adapters::Memcached: switched to Dalli by default * Adapters::Daybreak: add option :sync to load and store * Adapters::LRUHash: add option :max_count * Adapters::Mongo: add options :user and :password * Adapters::Mongo: Correctly close connection * Adapters::Redis: Correctly close connection * Transformer: add inspect key transformer * Added #create method to atomically create entries * Added WeakCreate and WeakIncrement proxies * Added Mutex and Semaphore synchronization primitives for shared/distributed database locks * Rename unix socket options from :file to :socket 0.7.6 * Adapters::Daybreak: api changed * Adapters::File: flock fix for jruby * Transformer: add to_s key transformer 0.7.5 * OptionsSupport#with: Add support to insert additional proxies * Builder#adapter: Accepts Moneta store instance now 0.7.4 * Transformer: fix truncate * Adapters::RestClient: raise error if store fails * Adapters::TDB added * Adapters::Daybreak added * Adapters::Mongo - Expiration and increment support added * Pool proxy added * Mixin ExpiresSupport added * Expiration value handling unified * 0 and false are interpreted as persist value * Adapters::RestClient uses net/http now 0.7.3 * Added Adapters::RestClient * Added Rack::MonetaRest * Added Rack::MonetaStore 0.7.2 * Renamed WithOptions to OptionSupport * Refactored Base in Defaults mixin * Removed Transformer option :quiet * Transformer might raise an exception if an invalid value is transformed * Expires middleware only wraps Arrays and nils in an Array if no expiration time is given (backward compatible change) * Moneta middlewares are not allowed to modify option hash given to functions like #load and #store 0.7.1 * Memcached: Use binary protocol and no base64 encoding of the keys * Transformer: Remove newlines from base64 encodes values * Server: Add method #run which will block and #running? to allow forking * SDBM: #store might raise errors (Don't use SDBM, it is unstable!) * Add #decrement method * Fix #fetch to handle false correctly * Fix Expires middleware to handle boolean and nil values correctly * Base64 encode Riak keys since Riak needs valid UTF-8 for the REST interface 0.7.0 * Major rewrite by Daniel Mendler 0.6.0 * First public release by Yehuda Katz moneta-1.5.2/CONTRIBUTORS000066400000000000000000000030301433316074200146610ustar00rootroot00000000000000Adrian Madrid Alastair Pharo Alejandro Crosa Alessio Signorini Anthony Eden Antoine Beaupré Antonio Terceiro Atoxhybrid AtoxIO Ben Schwarz Benjamin Yu Corey Smith Daniel Mendler Denis Defreyne Derek Kastner Dylan Egan Hampton Catlin Hannes Georg Hendrik Beskow Jari Bakken Jay Mitchell Jeremy Voorhis Jon Crosby Jonathan Gnagy lakshan Mal McKay Marek Skrobacki Mauro Asprea Nathaniel Bibler Olle Jonsson Orien Madgwick <_@orien.io> Patrik Rak Piotr Murach Potapov Sergey Quin Hoxie Ryan T. Hosford Scott Wadden Sven Riedel Thomas R. Koll Timo Goebel Tom Meier Tony Han Xavier Shay Yehuda Katz moneta-1.5.2/Gemfile000066400000000000000000000102241433316074200142770ustar00rootroot00000000000000source 'https://rubygems.org' gemspec group :transformers, optional: true do group :tnet, optional: true do gem 'tnetstring' end group :bencode, optional: true do gem 'bencode' end group :ox, optional: true do gem 'ox', platforms: :ruby end group :bert, optional: true do gem 'bert', platforms: :ruby end group :php, optional: true do gem 'php-serialize' end group :bson, optional: true do gem 'bson', '>= 4.0.0' end group :msgpack, optional: true do gem 'msgpack', platforms: :ruby gem 'msgpack-jruby', platforms: :jruby end # Compressors used by Transformer group :bzip2, optional: true do gem 'rbzip2', '>= 0.3.0' end group :lz4, optional: true do gem 'lz4-ruby', platforms: :ruby end group :lzma, optional: true do gem 'ruby-lzma', platforms: :ruby end group :lzo, optional: true do gem 'lzoruby', platforms: :ruby end group :snappy, optional: true do gem 'snappy', platforms: :ruby end # Hash transformer library group :city, optional: true do gem 'cityhash', platforms: :ruby end end # Backends group :Daybreak, optional: true do gem 'daybreak' end group :ActiveRecord, optional: true do gem 'activerecord', '~> 5.2' end group :Redis, optional: true do gem 'redis', '~> 4.2' end group :Mongo, optional: true do gem 'mongo', '>= 2' end group :Sequel, optional: true do gem 'sequel', '5.52.0' end group :Memcached, optional: true do group :MemcachedDalli, optional: true do gem 'dalli', '~> 2.7.11' end group :MemcachedNative, optional: true do gem 'jruby-memcached', platforms: :jruby gem 'memcached', platforms: :ruby end end group :Riak, optional: true do gem 'riak-client' end group :Cassandra, optional: true do install_if lambda { RUBY_ENGINE != 'ruby' || Gem::Version.new(RUBY_VERSION) < Gem::Version.new('3.0.0') } do gem 'cassandra-driver' end end group :TokyoTyrant, optional: true do gem 'tokyotyrant' end group :HBase, optional: true do gem 'hbaserb' end group :LocalMemCache, optional: true do gem 'localmemcache', platforms: :ruby end group :TDB, optional: true do gem 'tdb', platforms: :ruby end group :LevelDB, optional: true do gem 'leveldb-ruby', platforms: :ruby end group :LMDB, optional: true do gem 'lmdb', platforms: :mri end group :TokyoCabinet, optional: true do gem 'tokyocabinet', platforms: :ruby end group :KyotoCabinet, optional: true do install_if lambda { Gem::Version.new(RUBY_VERSION) < Gem::Version.new('2.7.0') } do gem 'kyotocabinet-ruby-reanimated', platforms: :ruby end end group :H2, optional: true do gem 'activerecord-jdbch2-adapter', platforms: :jruby, github: 'jruby/activerecord-jdbc-adapter', glob: 'activerecord-jdbch2-adapter/*.gemspec', branch: '52-stable' end group :GDBM, optional: true do gem 'ffi-gdbm', platforms: :jruby end group :RestClient do gem 'faraday' gem 'webrick' end group :DataMapper, optional: true do install_if lambda { RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) < Gem::Version.new('3.0.0') } do gem 'dm-core' gem 'dm-migrations' gem 'dm-mysql-adapter' end end group :Fog, optional: true do gem 'fog-aws', '>= 1.11.1' gem 'mime-types' end group :mysql, optional: true do gem 'activerecord-jdbcmysql-adapter', platforms: :jruby gem 'mysql2', platforms: :ruby end group :sqlite, optional: true do gem 'activerecord-jdbcsqlite3-adapter', platforms: :jruby gem 'sqlite3', '~> 1.5.3', platforms: :ruby end group :postgresql, optional: true do gem 'activerecord-jdbcpostgresql-adapter', platforms: :jruby gem 'pg', platforms: :ruby end group :SDBM, optional: true do install_if lambda { Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0') } do gem 'sdbm', platforms: :ruby end end # Rack integration testing group :rack do gem 'rack' gem 'rack-cache' end # Rails integration testing group :rails do gem 'actionpack', '~> 5.2.0' gem 'minitest', '~> 5.0' end # Used for generating the feature matrix group :doc, optional: true do gem 'kramdown', '~> 2.3.0' gem 'yard', '~> 0.9.20' end # Used for running a dev console group :console, optional: true do gem 'irb' gem 'rdoc' end moneta-1.5.2/LICENSE000066400000000000000000000021061433316074200140110ustar00rootroot00000000000000Copyright (c) 2009 - 2019 Daniel Mendler, Yehuda Katz, Alastair Pharo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. moneta-1.5.2/README.md000066400000000000000000001645521433316074200143010ustar00rootroot00000000000000# Moneta: A unified interface for key/value stores [![Gem Version](https://badge.fury.io/rb/moneta.svg)](http://rubygems.org/gems/moneta) [![Build Status](https://github.com/moneta-rb/moneta/actions/workflows/ruby.yml/badge.svg)](https://github.com/moneta-rb/moneta/actions/workflows/ruby.yml) [![Code Climate](https://codeclimate.com/github/moneta-rb/moneta.svg)](https://codeclimate.com/github/moneta-rb/moneta) [![Flattr this git repo](http://api.flattr.com/button/flattr-badge-large.png)](https://flattr.com/submit/auto?user_id=min4d&url=https://github.com/moneta-rb/moneta&title=Moneta&language=&tags=github&category=software) Moneta provides a standard interface for interacting with various kinds of key/value stores. Moneta supports the well-known NoSQL and document based stores. A short overview of the features: * Supports a lot of backends with consistent behaviour (See below) * Allows a full configuration of the serialization -> compression -> adapter stack using proxies (Similar to [Rack middlewares](http://rack.github.com/)) * Configurable serialization via `Moneta::Transformer` proxy (Marshal/JSON/YAML and many more) * Configurable value compression via `Moneta::Transformer` proxy (Zlib, Snappy, LZMA, ...) * Configurable key transformation via `Moneta::Transformer` proxy * Expiration for all stores (Added via proxy `Moneta::Expires` if not supported natively) * Atomic operations * Atomic incrementation and decrementation for most stores (Method `#increment` and `#decrement`) * Atomic creation of entries (Method `#create`) * Shared/distributed database-wide synchronization primitives `Moneta::Mutex` and `Moneta::Semaphore` * Includes a simple pure-ruby key/value server (`Moneta::Server`) and client (`Moneta::Adapters::Client`) * Integration with [Rails](http://rubyonrails.org/), [Rack](http://rack.github.com/)/[Rack-Cache](https://github.com/rtomayko/rack-cache), [Sinatra](http://sinatrarb.com/), [Padrino](http://padrinorb.com) and [Ramaze](http://ramaze.net/). If you are not yet convinced, you might ask why? What are the goals of the project? * Get people started quickly with key/value stores! Therefore all the adapters are included in the gem and you are ready to go. [Tilt](https://github.com/rtomayko/tilt) does the same for template languages. * Make it easy to compare different key/value stores and benchmark them * To hide a lot of different and maybe complex APIs behind one well-designed and simple Moneta API * Give people a starting point or example code to start working with their favourite key/value store. Feel free to copy code, please mention Moneta then :) * Create a reusable piece of code, since similar things are solved over and over again ([Rails](http://rubyonrails.org/) brings its own cache stores, and many frameworks do the same...) Moneta is tested thoroughly using [GitHub Actions](https://github.com/moneta-rb/moneta/actions). ------ ## Getting started Install Moneta via Rubygems ~~~ $ gem install moneta ~~~ or add it to your Gemfile ~~~ ruby gem 'moneta' ~~~ Now you are ready to go: ~~~ ruby require 'moneta' # Create a simple file store store = Moneta.new(:File, dir: 'moneta') # Store some entries store['key'] = 'value' # Read entry store.key?('key') # returns true store['key'] # returns 'value' store.close ~~~ ------ ## Links * Source: * Bugs: * Tests and benchmarks: * API documentation: * Latest Gem: * GitHub main: * Changelog: In case you are wondering, Moneta uses [Semantic Versioning](https://semver.org/) since v1.0.0. ------ ## Supported backends Out of the box, it supports the following backends. Use the backend name symbol in the Moneta constructor (e.g. `Moneta.new(:Memory)`). * Memory: * In-memory store (`:Memory`) * LRU hash - prefer this over :Memory! (`:LRUHash`) * [LocalMemCache](http://localmemcache.rubyforge.org/) (`:LocalMemCache`) * [Memcached](http://memcached.org/) store (`:Memcached`, `:MemcachedNative` and `:MemcachedDalli`) * Relational Databases: * [DataMapper](http://datamapper.org/) (`:DataMapper`) * [ActiveRecord](https://rubygems.org/gems/activerecord) (`:ActiveRecord`) * [Sequel](http://sequel.rubyforge.org/) (`:Sequel`) * [Sqlite3](http://sqlite.org/) (`:Sqlite`) * Filesystem: * [PStore](http://ruby-doc.org/stdlib/libdoc/pstore/rdoc/PStore.html) (`:PStore`) * [YAML](http://www.ruby-doc.org/stdlib/libdoc/yaml/rdoc/YAML/Store.html) store (`:YAML`) * Filesystem directory store (`:File`) * Filesystem directory store which spreads files in subdirectories using md5 hash (`:HashFile`) * Key/value databases: * [Berkeley DB using DBM interface or NDBM (Depends on Ruby environment)](http://www.ruby-doc.org/stdlib/libdoc/dbm/rdoc/DBM.html) (`:DBM`) * [Cassandra](http://cassandra.apache.org/) (`:Cassandra`) * [Daybreak](https://propublica.github.io/daybreak/) (`:Daybreak`) * [GDBM](http://www.ruby-doc.org/stdlib/libdoc/gdbm/rdoc/GDBM.html) (`:GDBM`) * [HBase](http://hbase.apache.org/) (`:HBase`) * [LevelDB](http://code.google.com/p/leveldb/) (`:LevelDB`) * [LMDB](http://symas.com/lmdb) (`:LMDB`) * [Redis](http://redis.io/) (`:Redis`) * [Riak](http://docs.basho.com/) (`:Riak`) * [SDBM](http://www.ruby-doc.org/stdlib/libdoc/sdbm/rdoc/SDBM.html) (`:SDBM`) * [KyotoCabinet](http://fallabs.com/kyotocabinet/) (`:KyotoCabinet`) * [TokyoCabinet](http://fallabs.com/tokyocabinet/) (`:TokyoCabinet`) * [TokyoTyrant](http://fallabs.com/tokyotyrant/) (`:TokyoTyrant`) * [Simple Samba database TDB](http://tdb.samba.org/) (`:TDB`) * Document databases: * [CouchDB](http://couchdb.apache.org/) (`:Couch`) * [MongoDB](http://www.mongodb.org/) (`:Mongo`) * Moneta network protocols: * Moneta key/value client (`:Client` works with `Moneta::Server`) * Moneta HTTP/REST client (`:RestClient` works with `Rack::MonetaRest`) * Other * [Fog](http://fog.io/) cloud storage which supports Amazon S3, Rackspace, etc. (`:Fog`) * Storage which doesn't store anything (`:Null`) Some of the backends are not exactly based on key/value stores, e.g. the relational ones. These are useful if you already use the corresponding backend in your application. You get a key/value store for free then without installing any additional services and you still have the possibility to upgrade to a real key/value store. ### Backend feature matrix __NOTE:__ The backend matrix is much more readable on rubydoc.info than on github. [Go there!](http://rubydoc.info/github/moneta-rb/moneta/main/file/README.md#backend-matrix)
AdapterRequired gemsMRI support1JRuby support1Multi-thread safe2Multi-process safe3Atomic increment4Atomic create5Native expires6PersistentKey TraversalBulk read7Bulk write8Description
Persistent stores
MongomongoMongoDB database
RedisredisRedis database
ActiveRecordactiverecordActiveRecord ORM
File-File store
LMDBlmdbSymas Lightning Memory-Mapped Database (LMDB)
SequelsequelSequel ORM
TokyoTyranttokyotyrant or ruby-tokyotyrantTokyoTyrant database
PStore-9PStore store
YAML-9YAML store
Sqlitesqlite3?9Sqlite3 database
DaybreakdaybreakIncredibly fast pure-ruby key/value store Daybreak
DBM-Berkeley DB using DBM interface or NDBM (Depends on Ruby environment)
GDBMffi-gdbm on JRubyGDBM database
LevelDBleveldbLevelDB database
SDBM-SDBM database
TDBtdbTDB database
KyotoCabinetkyotocabinet-ruby or kyotocabinet-ruby-reanimatedKyotoCabinet database
TokyoCabinettokyocabinetTokyoCabinet database
DataMapperdm-core, dm-migrationsDataMapper ORM
Couchfaraday, multi_jsonCouchDB database
HBasehbaserb?HBase database
Cassandracassandra?Cassandra distributed database
LocalMemCachelocalmemcacheLocalMemCache database
Fogfog?Fog cloud store
Riakriak-clientRiak database
Non-persistent stores
MemcachedDallidalli10Memcached database with Dalli library
Memcacheddalli or memcached?11?1110?11?11Memcached database
MemcachedNativememcached10Memcached database with native library
Cookie-12Cookie in memory store
LRUHash-12LRU memory store
Memory-12Memory store
Null-No database
Network clients
Client-?13?13?13?13?13Moneta client adapter
RestClient-?13Moneta REST client adapter
1. Indicates that the adapter is expected to work on this platform. Most adapters will at least work on MRI, but some are curently considered unstable, in which case they are not supported on any platform. 2. Make adapters thread-safe by using `Moneta::Lock` or by passing the option `threadsafe: true` to `Moneta#new`. There is also `Moneta::Pool` which can be used to share a store between multiple threads if the store is multi-process safe. I recommend to add the option `:threadsafe` to ensure thread-safety since for example under JRuby and Rubinius even the basic datastructures are not thread safe due to the lack of a global interpreter lock (GIL). This differs from MRI where some adapters might appear thread safe already but only due to the GIL. 3. Share a Moneta store between multiple processes using `Moneta::Shared` (See below). 4. If a store provides atomic increment it can be used with `Moneta::Semaphore`. You can add weak `#increment` support using the `Moneta::WeakIncrement` proxy. 5. If a store provides atomic creation it can be used with `Moneta::Mutex`. You can add weak `#create` support using the `Moneta::WeakCreate` proxy. 6. Add expiration support by using `Moneta::Expires` or by passing the option `expires: true` to `Moneta#new`. 7. This indicates that there is some performance gain when fetching multiple values at once using `#values_at`/`#fetch_values` or `#slice`. For instance, the `MGET` instruction in Redis, or the ability to retrieve several rows in one query in SQL. 8. This indicates that there is some performance gain when storing multiple key/value pairs at once using `#merge!`/`#update`. 9. Sqlite/YAML/PStore are multiprocess safe, but the performance suffers badly since the whole database file must be locked for writing. Use a key/value server if you want multiprocess concurrency! 10. There are some servers which use the memcached protocol but which are persistent (e.g. [MemcacheDB](http://memcachedb.org/), [Kai](http://sourceforge.net/apps/mediawiki/kai), [IronCache](http://dev.iron.io/cache/reference/memcache/), [Roma](https://github.com/roma/roma/tree), [Flare](http://labs.gree.jp/Top/OpenSource/Flare-en.html) and [Kumofs](https://github.com/etolabo/kumofs)) 11. This feature is only available if the dalli backend is selected 12. Store is multi-process safe because it is an in-memory store, values are not shared between multiple processes 13. Depends on server ------ ## Proxies In addition it supports proxies (Similar to [Rack middlewares](http://rack.github.com/)) which add additional features to storage backends: * `Moneta::Proxy` and `Moneta::Wrapper` are the proxy base classes. * `Moneta::Cache` combine two stores, one as backend and one as cache (e.g. `Moneta::Adapters::File` + `Moneta::Adapters::LRUHash`). Add it in the builder using `use(:Cache) {}`. * `Moneta::Expires` to add expiration support to stores which don't support it natively. Add it in the builder using `use :Expires`. * `Moneta::Fallback` use a store as a fallback when exceptions occur (by default the `:Null` adapter is used so that an error results in a no-op). Add it to the builder using `use(:Fallback, rescue: IOError)` * `Moneta::Lock` to make store thread safe. Add it in the builder using `use :Lock`. * `Moneta::Logger` to log database accesses. Add it in the builder using `use :Logger`. * `Moneta::Pool` to create a pool of stores as a means of making the store thread safe. Add it in the builder using `use(:Pool, min: 2, max: 4, ttl: 60, timeout: 5) {}`. * `Moneta::Shared` to share a store between multiple processes. Add it in the builder using `use(:Shared) {}`. * `Moneta::Stack` to stack multiple stores (Read returns result from first where the key is found, writes go to all stores). Add it in the builder using `use(:Stack) {}`. * `Moneta::Transformer` transforms keys and values (Marshal, YAML, JSON, Base64, MD5, ...). Add it in the builder using `use :Transformer`. * `Moneta::WeakIncrement` and `Moneta::WeakCreate` to add `#create` and `#increment` support without atomicity (weak) to stores which don't support it. * `Moneta::WeakEachKey` to add key traversal to stores that don't support it, with the important caveat that only those keys previously seen by this proxy will be traversed. Check the YARD documentation for more information and examples. ### Serializers and compressors (`Moneta::Transformer`) Supported serializers: * BEncode (`:bencode`) * BERT (`:bert`) * BSON (`:bson`) * JSON (`:json`) * Marshal (`:marshal`) * MessagePack (`:msgpack`) * Ox (`:ox`) * PHP (`:php`) * TNetStrings (`:tnet`) * YAML (`:yaml`) Supported value compressors: * Bzip2 (`:bzip2`) * LZ4 (`:lz4`) * LZMA (`:lzma`) * LZO (`:lzo`) * Snappy (`:snappy`) * QuickLZ (`:quicklz`) * Zlib (`:zlib`) Supported encoders: * Base64 (RFC 2045; `:base64`) * URL-safe Base64 (RFC 4648; `:urlsafe_base64`) * Url escape (`:escape`) * Hexadecimal (`:hex`) * QP (`:qp`) * UUEncode (`:uuencode`) Special transformers: * Digests (MD5, Shas, CityHash, ...) * Add prefix to keys (`:prefix`) * HMAC to verify values (`:hmac`, useful for `Rack::MonetaCookies`) ------ ## Moneta API The Moneta API is purposely extremely similar to the Hash API with a few minor additions. Every method takes also a optional option hash. In order so support an identical API across stores, Moneta does not support partial matches. ~~~ #initialize(options) options differs per-store, and is used to set up the store. #[](key) retrieve a key. If the key is not available, return nil. #load(key, options = {}) retrieve a key. If the key is not available, return nil. #fetch(key, options = {}, &block) retrieve a key. If the key is not available, execute the block and return its return value. #fetch(key, value, options = {}) retrieve a key. If the key is not available, return the value, #[]=(key, value) set a value for a key. If the key is already used, clobber it. keys set using []= will never expire. #store(key, value, options = {}) same as []=, but you can supply options. #delete(key, options = {}) delete the key from the store and return the current value. #key?(key, options = {}) true if the key exists, false if it does not. #increment(key, amount = 1, options = {}) increment numeric value. This is an atomic operation which is not supported by all stores. Returns current value. #decrement(key, amount = 1, options = {}) increment numeric value. This is an atomic operation which is not supported by all stores. Returns current value. This is just syntactic sugar for incrementing with a negative value. #create(key, value, options = {}) create entry. This is an atomic operation which is not supported by all stores. Returns true if the value was created. #values_at(*keys, **options) retrieve multiple keys. Returns an array of equal length to the keys. Each entry in the array is either the value corresponding to the key in the same position, or nil if the key is not available. #fetch_values(*keys, **options, &block) retrieve multiple keys. Return is identical to values_at, except that when a block is given it will be called once for each key that is not available, and the return value of the block will be used in place of nil in the array. #slice(*keys, **options) retrieve multiple keys. Returns an enumerable of key-value pairs, one for each of the supplied keys that is present in the store. #merge!(pairs, options = {}) set values for multiple keys. "pairs" must be an enumerable of key-value pairs to be stored. Any existing keys will be clobbered. #merge!(pairs, options = {}, &block) set values for multiple keys. For each existing key, execute the block passing the key, existing value and new value, and store the return value. #update(pairs, options = {}, &block) same as merge! #each_key return an enumerable which will yield all keys in the store, one at a time. This method is present if and only if the store supports the :each_key feature. #each_key(&block) yield all keys in the store to the block, one at a time. Again, this method is present if and only if the store supports the :each_key feature. #clear(options = {}) clear all keys in this store. #close close database connection. #features return array of features, e.g. [:create, :expires, :increment] #supports?(feature) returns true if store supports a given feature ~~~ ### Creating a Store There is a simple interface to create a store using `Moneta.new`. You will get automatic key and value serialization which is provided by `Moneta::Transformer`. This allows you to store arbitrary Ruby objects. You can tune some options when you call `Moneta.new`. However for very fine tuning use `Moneta.build`. ~~~ ruby store = Moneta.new(:Memcached, server: 'localhost:11211') store['key'] = 'value' store['hash_key'] = {a: 1, b: 2} store['object_key'] = MarshallableRubyObject.new ~~~ If you want to have control over the proxies, you have to use `Moneta.build`: ~~~ ruby store = Moneta.build do # Adds expires proxy use :Expires # Transform key using Marshal and Base64 and value using Marshal use :Transformer, key: [:marshal, :base64], value: :marshal # IMPORTANT: adapter must be defined last for the builder to function properly. # Memory backend adapter :Memory end ~~~ You can also directly access the underlying adapters if you don't want to use the Moneta stack. ~~~ ruby db = Moneta::Adapters::File.new(dir: 'directory') db['key'] = {a: 1, b: 2} # This will fail since you can only store Strings # However for Mongo and Couch this works # The hash will be mapped directly to a Mongo/Couch document. db = Moneta::Adapters::Couch.new db['key'] = {a: 1, b: 2} db = Moneta::Adapters::Mongo.new db['key'] = {a: 1, b: 2} ~~~ ### Expiration The Cassandra, Memcached, Redis and Mongo backends support expiration natively. ~~~ ruby cache = Moneta::Adapters::Memcached.new # Or using the builder... cache = Moneta.build do adapter :Memcached end # Expires in 60 seconds cache.store(key, value, expires: 60) # Never expire cache.store(key, value, expires: 0) cache.store(key, value, expires: false) # Update expires time if value is found cache.load(key, expires: 30) cache.key?(key, expires: 30) # Or remove the expiration if found cache.load(key, expires: false) cache.key?(key, expires: 0) ~~~ You can add the expires feature to other backends using the `Moneta::Expires` proxy. But be aware that expired values are not deleted automatically if they are not looked up. ~~~ ruby # Using the :expires option cache = Moneta.new(:File, dir: '...', expires: true) # or manually by using the proxy... cache = Moneta::Expires.new(Moneta::Adapters::File.new(dir: '...')) # or using the builder... cache = Moneta.build do use :Expires adapter :File, dir: '...' end ~~~ ### Key traversal Where supported by the store's backend, it is possible to traverse the keys in the store using the `#each_key` method. Support for this can be tested by calling `store.supports?(:each_key)`, or checking for the presence of `:each_key` in `store.features`. ~~~ ruby store.each_key # returns an Enumerable store.each_key do |key| store.load(key) # read operations are supported within the block store[key] = "x" # behaviour of write operations is undefined end ~~~ ### Atomic operations #### Atomic incrementation and raw access The stores support the `#increment` which allows atomic increments of unsigned integer values. If you increment a non existing value, it will be created. If you increment a non integer value an exception will be raised. ~~~ ruby store.increment('counter') # returns 1, counter created store.increment('counter') # returns 2 store.increment('counter', -1) # returns 1 store.increment('counter', 13) # returns 14 store.increment('counter', 0) # returns 14 store.decrement('counter') # returns 13 store['name'] = 'Moneta' store.increment('name') # raises an Exception ~~~ If you want to access the counter value you have to use raw access to the datastore. This is only important if you have a `Moneta::Transformer` somewhere in your proxy stack which transforms the values e.g. with `Marshal`. ~~~ ruby store.increment('counter') # returns 1, counter created store.load('counter', raw: true) # returns 1 store.store('counter', '10', raw: true) store.increment('counter') # returns 11 ~~~ Fortunately there is a nicer way to do this using some syntactic sugar! ~~~ ruby store.increment('counter') # returns 1, counter created store.raw['counter'] # returns 1 store.raw.load('counter') # returns 1 store.raw['counter'] = '10' store.increment('counter') # returns 11 ~~~ You can also keep the `raw` store in a variable and use it like this: ~~~ ruby counters = store.raw counters.increment('counter') # returns 1, counter created counters['counter'] # returns 1 counters.load('counter') # returns 1 counters['counter'] = '10' counters.increment('counter') # returns 11 ~~~ #### Atomic create The stores support the `#create` which allows atomic creation of entries. `#create` returns true if the value was created. ~~~ ruby store.create('key', 'value') # returns true store.create('key', 'other value') # returns false ~~~ #### Atomic bulk operations All stores support storage and retrieval of multiple keys using `#values_at`/`#fetch_values`/`#slice` and `#merge!`/`#update`. Wherever possible, these operations are performed atomically. When this is not possible, the `#load` and `#store` methods are called once for each key. ~~~ ruby store.merge!('key1' => 'value1', 'key2' => 'value2') # stores two keys store.values_at('key1', 'key2', 'key3') # returns ['value1', 'value2', nil] store.fetch('key1', 'key3') { |k| k + ' missing' } # returns ['key1', 'key3 missing'] store.slice('key1', 'key2', 'key3') # returns enumerable of ['key1', 'value1'], ['key2', 'value2'] store.merge!('key2' => 'new value2', 'key3' => 'value3') do |key, value, new_value| [value, new_value].join('+') end # stores "value3" and "value2+new value2" ~~~ #### Shared/distributed synchronization primitives Moneta provides shared/distributed synchronization primitives which are shared database-wide between all clients. `Moneta::Mutex` allows a single thread to enter a critical section. ~~~ ruby mutex = Moneta::Mutex.new(store, 'mutex_key') mutex.synchronize do mutex.locked? # returns true # Synchronized access to counter store['counter'] += 1 end begin mutex.lock mutex.locked? # returns true # ... ensure mutex.unlock end ~~~ `Moneta::Semaphore` allows `max_concurrent` threads to enter a critical section. ~~~ ruby semaphore = Moneta::Semaphore.new(store, 'semaphore_counter', max_concurrent) semaphore.synchronize do semaphore.locked? # returns true # ... end begin semaphore.enter semaphore.locked? # returns true # ... ensure semaphore.leave end ~~~ #### Weak atomic operations If an underlying adapter doesn't provide atomic `#create` or `#increment` and `#decrement` you can use the proxies `Moneta::WeakIncrement` and `Moneta::WeakCreate` to add support without atomicity. But then you have to ensure that the store is not shared by multiple processes and thread-safety is provided by `Moneta::Lock`. ### Syntactic sugar and option merger For raw data access as described before the class `Moneta::OptionMerger` is used. It works like this: ~~~ ruby # All methods after 'with' get the options passed store.with(raw: true).load('key') # You can also specify the methods store.with(raw: true, only: :load).load('key') store.with(raw: true, except: [:key?, :increment]).load('key') # Syntactic sugar for raw access store.raw.load('key') # Access substore where all keys get a prefix substore = store.prefix('sub') substore['key'] = 'value' store['key'] # returns nil store['subkey'] # returns 'value' # Set expiration time for all keys short_lived_store = store.expires(60) short_lived_store['key'] = 'value' ~~~ ### Add proxies to existing store You can add proxies to an existing store. This is useful if you want to compress only a few values for example. ~~~ ruby compressed_store = store.with(prefix: 'compressed') do use :Transformer, value: :zlib end store['key'] = 'this value will not be compressed' compressed_store['key'] = 'value will be compressed' ~~~ ------ ## Framework Integration Inspired by [redis-store](https://github.com/jodosha/redis-store) there exist integration classes for [Rails](http://rubyonrails.org/) and [Rack](http://rack.github.com/)/[Rack-Cache](https://github.com/rtomayko/rack-cache). You can also use all the Rack middlewares together with Rails and the [Sinatra](http://sinatrarb.com/) framework. There exist the following integration classes: * [Rack](http://rack.github.com/), [Rails](http://rubyonrails.org/) and [Sinatra](http://sinatrarb.com/) * `Rack::Session::Moneta` is a Rack middleware to use Moneta for storing sessions * `Rack::MonetaStore` is a Rack middleware which places a Moneta store in the environment and enables per-request caching * `Rack::MonetaCookies` is a Rack middleware which uses Moneta to store cookies * `Rack::MonetaRest` is a Rack application which exposes a Moneta store via REST/HTTP * `Rack::Cache::Moneta` provides meta and entity stores for Rack-Cache * [Rails](http://rubyonrails.org/) * `ActionDispatch::Session::MonetaStore` is a Rails middleware to use Moneta for storing sessions * `ActiveSupport::Cache::MonetaStore` is a Rails cache implementation which uses a Moneta store as backend * [Ramaze](http://ramaze.net/) * `Ramaze::Cache::Moneta` is integrated into the Ramaze project and allows Ramaze to use Moneta as caching store * [Padrino](http://padrinorb.com) adopted Moneta to replace their cache stores in padrino-cache. ### Rack #### Session store You can use Moneta as a [Rack](http://rack.github.com/) session store. Use it in your `config.ru` like this: ~~~ ruby require 'rack/session/moneta' # Use only the adapter name use Rack::Session::Moneta, store: :Redis # Use Moneta.new use Rack::Session::Moneta, store: Moneta.new(:Memory, expires: true) # Set rack options use Rack::Session::Moneta, key: 'rack.session', domain: 'foo.com', path: '/', expire_after: 2592000, store: Moneta.new(:Memory, expires: true) # Use the Moneta builder use Rack::Session::Moneta do use :Expires adapter :Memory end ~~~ #### Moneta middleware There is a simple middleware which places a Moneta store in the Rack environment at `env['rack.moneta_store']`. It supports per-request caching if you add the option `cache: true`. Use it in your `config.ru` like this: ~~~ ruby require 'rack/moneta_store' # Add Rack::MonetaStore somewhere in your rack stack use Rack::MonetaStore, :Memory, cache: true run lambda { |env| env['rack.moneta_store'] # is a Moneta store with per-request caching } # Pass it a block like the one passed to Moneta.build use Rack::MonetaStore do use :Transformer, value: :zlib adapter :Cookie end run lambda { |env| env['rack.moneta_store'] # is a Moneta store without caching } ~~~ #### REST server If you want to expose your Moneta key/value store via HTTP, you can use the Rack/Moneta REST service. Use it in your `config.ru` like this: ~~~ ruby require 'rack/moneta_rest' map '/moneta' do run Rack::MonetaRest.new(:Memory) end # Or pass it a block like the one passed to Moneta.build run Rack::MonetaRest.new do use :Transformer, value: :zlib adapter :Memory end ~~~ #### Rack-Cache You can use Moneta as a [Rack-Cache](https://github.com/rtomayko/rack-cache) store. Use it in your `config.ru` like this: ~~~ ruby require 'rack/cache/moneta' use Rack::Cache, metastore: 'moneta://Memory?expires=true', entitystore: 'moneta://Memory?expires=true' # Or used named Moneta stores Rack::Cache::Moneta['named_metastore'] = Moneta.build do use :Expires adapter :Memory end use Rack::Cache, metastore: 'moneta://named_metastore', entity_store: 'moneta://named_entitystore' ~~~ #### Cookies Use Moneta to store cookies in [Rack](http://rack.github.com/). It uses the `Moneta::Adapters::Cookie`. You might wonder what the purpose of this store or Rack middleware is: It makes it possible to use all the transformers on the cookies (e.g. `:prefix`, `:marshal` and `:hmac` for value verification). ~~~ ruby require 'rack/moneta_cookies' use Rack::MonetaCookies, domain: 'example.com', path: '/path' run lambda { |env| req = Rack::Request.new(env) req.cookies #=> is now a Moneta store! env['rack.request.cookie_hash'] #=> is now a Moneta store! req.cookies['key'] #=> retrieves 'key' req.cookies['key'] = 'value' #=> sets 'key' req.cookies.delete('key') #=> removes 'key' [200, {}, []] } ~~~ ### Rails #### Session store Add the session store in your application configuration `config/environments/*.rb`. ~~~ ruby require 'action_dispatch/middleware/session/moneta_store' # Only by adapter name config.cache_store :moneta_store, store: :Memory # Use Moneta.new config.cache_store :moneta_store, store: Moneta.new(:Memory) # Use the Moneta builder config.cache_store :moneta_store, store: Moneta.build do use :Expires adapter :Memory end ~~~ #### Cache store Add the cache store in your application configuration `config/environments/*.rb`. Unfortunately the Moneta cache store doesn't support matchers. If you need these features use a different server-specific implementation. ~~~ ruby require 'active_support/cache/moneta_store' # Only by adapter name config.cache_store :moneta_store, store: :Memory # Use Moneta.new config.cache_store :moneta_store, store: Moneta.new(:Memory) # Use the Moneta builder config.cache_store :moneta_store, store: Moneta.build do use :Expires adapter :Memory end ~~~ ### Padrino [Padrino](http://padrinorb.com/) adopted Moneta to replace their cache stores in padrino-cache. You use it like this ~~~ ruby # Global Padrino caching # Don't forget the expires: [true, Integer] if you want expiration support! Padrino.cache = Moneta.new(:Memory, expires: true) # Application caching # Don't forget the expires: [true, Integer] if you want expiration support! set :cache, Moneta.new(:Memory, expires: true) ~~~ ## Advanced ### Build your own key value server You can use Moneta to build your own key/value server which is shared between multiple processes. If you run the following code in two different processes, they will share the same data which will also be persistet in the database `shared.db`. ~~~ ruby require 'moneta' store = Moneta.build do use :Transformer, key: :marshal, value: :marshal use :Shared do use :Cache do cache do adapter :LRUHash end backend do adapter :GDBM, file: 'shared.db' end end end end ~~~ If you want to go further, you might want to take a look at `Moneta::Server` and `Moneta::Adapters::Client` which are used by `Moneta::Shared` and provide the networking communication. But be aware that they are experimental and subjected to change. They provide an acceptable performance (for being ruby only), but don't have a stable protocol yet. You might wonder why I didn't use [DRb](http://www.ruby-doc.org/stdlib-1.9.3/libdoc/drb/rdoc/DRb.html) to implement server and client - in fact my first versions used it, but with much worse performance and it was real fun to implement the networking directly :) There is still much room for improvement and experiments, try [EventMachine](http://eventmachine.rubyforge.org/), try [Kgio](http://bogomips.org/kgio/), ... ### ToyStore ORM If you want something more advanced to handle your objects and relations, use John Nunemaker's [ToyStore](https://github.com/jnunemaker/toystore) which works together with Moneta. Assuming that `Person` is a `ToyStore::Object` you can add persistence using Moneta as follows: ~~~ ruby # Use the Moneta Redis backend Person.adapter :memory, Moneta.new(:Redis) ~~~ ------ ## Testing and Benchmarks Testing is done using [GitHub Actions](https://github.com/moneta-rb/moneta/actions). Currently we support MRI Ruby >= 2.4.0 (but not yet 3.x) and the JRuby >= 9.2.9.0. MRI 2.3.0 should mostly still work, but is no longer tested in CI. ~~Benchmarks for each store are done on [Travis-CI](http://travis-ci.org/moneta-rb/moneta) for each build.~~ At the time of writing, benchmarks still need to be migrated from Travis to GitHub Actions. Take a look there to compare the speed of the different key value stores for different key/value sizes and size distributions. Feel free to add your own configurations! The impact of Moneta should be minimal since it is only a thin layer on top of the different stores. ------ ## How to contribute? Always feel free to open an issue on https://github.com/moneta-rb/moneta/issues if something doesn't work as you expect it to work. Feedback is also very welcome! My only request about patches is that you please try to test them before submitting. ### Contribute an adapter If you want support for another adapter you can at first at it to the list of missing adapters at https://github.com/moneta-rb/moneta/issues/16 If you choose to implement an adapter please also add tests. Please check also if anything in `.github/workflows` needs changes, for example if you need to start additional services. Check if the default settings in Moneta#new are appropriate for your adapter. If not specify a better one. Don't forget to edit the README.md and the CHANGES. ------ ## Alternatives * [Horcrux](https://github.com/technoweenie/horcrux): Used at github, supports batch operations but only Memcached backend * [ActiveSupport::Cache::Store](http://api.rubyonrails.org/classes/ActiveSupport/Cache/Store.html): The Rails cache store abstraction * [ToyStore](https://github.com/jnunemaker/toystore): ORM mapper for key/value stores * [ToyStore Adapter](https://github.com/jnunemaker/adapter): Adapter to key/value stores used by ToyStore, Moneta can be used directly with the ToyStore Memory adapter * [Cache](https://github.com/seamusabshere/cache): Rubygem cache wraps Memcached and Redis * [Ramaze::Cache](http://ramaze.net/documentation/Innate/Cache.html): Cache stores of the Ramaze framework with support for LocalMemCache, Memcached, Sequel, Redis, ... ------ ## Authors * [Daniel Mendler](https://github.com/minad) * [Hannes Georg](https://github.com/hannesg) * [Alastair Pharo](https://github.com/asppsa) * Originally by [Yehuda Katz](https://github.com/wycats) and contributors moneta-1.5.2/SPEC.md000066400000000000000000000220661433316074200140670ustar00rootroot00000000000000# Moneta Specification (See RFC 2119 for use of MUST, SHOULD, MAY, MUST NOT, and SHOULD NOT) The purpose of the moneta specification is to create a general-purpose API for interacting with key-value stores. In general, libraries that need to interact with key-value stores should be able to specify that they can use any "moneta-compliant store". Moneta ships with a set of executable specs which you can use to verify spec-compliance with your moneta adapter. ## Class Methods ### new(options[Hash] => {}) => Object Return an instance of the moneta adapter, with the instance methods listed below. The options hash is a required parameter, and the adapter may specify whatever additional requirements it needs to properly instantiate it. ## Instance Methods ### \[\](key[Object]) => Object Return the value stored in the key-value-store under the provided key. Adapters MUST return a duplicate of the original value, and consumers should expect that adapters might serialize and deserialize the key and value. As a result, both the key and value MUST be objects that can be serialized using Ruby's Marshal system. ### \[\]=(key[Object], value[Object]) => Object(value) Store the value in the key-value-store under the provided key. Adapters MAY serialize the value using Ruby's Marshal system, and MUST NOT store a reference to the original value in the store, unless Ruby disallows duplication of the original value. Adapters SHOULD NOT simply call dup on the value, unless the value stores no references to other Object. For example, an adapter MAY store a dup of a String, but SHOULD NOT store a dup of ["hello", "world"]. ### fetch(key[Object], options[Hash] => {}, &block) => Object Return the value stored in the key-value-store under the provided key. If no value is stored under the provided key, the adapter MUST yield to the block, and return the value. The adapter MUST NOT store the value returned from the block in the key-value-store. ### fetch(key[Object], value[Object], options[Hash] => {}) => Object Return the value stored in the key-value-store under the provided key. If no value is stored under the provided key, the adapter MUST return the default value provided. The adapter MUST NOT store the default value in the key-value-store. ### delete(key[Object], options[Hash] => {}) => Object Delete the value stored in the key-value-store for the key provided, and return the value previously stored there. After this operation, the key-value-store MUST behave as though no value was stored for the provided key. ### key?(key[Object], options[Hash] => {}) => [TrueClass, FalseClass] Determine whether a value exists in the key-value-store for the key provided. If a value exists, the adapter MUST return true. Otherwise, the adapter MUST return false. ### store(key[Object], value[Object], options[Hash] => {}) => Object(value) Behaves the same as []=, but allows the client to send additional options which can be specified by the adapter (and which may be specified by extensions to this specification). ### increment(key[Object], amount[Integer] = 1, options[Hash] => {}) => Integer(value) Increments a value atomically. This method is not supported by all stores and might raise a NotImplementedError. This method MUST accept negative amounts, but the result MUST be unsigned. ### decrement(key[Object], amount[Integer] = 1, options[Hash] => {}) => Integer(value) Decrements a value atomically. This method is not supported by all stores and might raise a NotImplementedError. This method MUST accept negative amounts, but the result MUST be unsigned. ### create(key[Object], value[Object], options[Hash] => {}) => [TrueClass, FalseClass] Creates a value atomically. This method is not supported by all stores and might raise a NotImplementedError. It MUST return true if the value was created. ### clear(options[Hash] => {}) Completely empty all keys and values from the key-value-store. Adapters MAY allow a namespace during initialization, which can scope this operation to a particular subset of keys. After calling clear, a [] operation MUST return nil for every possible key, and a key? query MUST return false for every possible key. ### close Closes the store ### features => Array<Symbol> and supports?(Symbol) => [TrueClass, FalseClass] Feature detection. Adapters MUST return :create and :increment if these methods are supported. ### `each_key => Enumerator` and `each_key(&block) => Object` Enumerates over the keys in the store. This method is not supported by all stores. When not supported, this method MUST raise a `NotImplementedError`, regardless of whether a block is supplied. When supported, this method allows traversal of all keys in the store. The method behaves differently depending on whether a block is supplied. In either case, for each key, `k` in the traversal, `key?(k)` MUST return `true`; and for each key, `k` for which `key?(k)` returns `true`, `k` MUST be traversed by `each_key`. Keys MAY be traversed in any order. Mutation of the store while traversing keys MAY be allowed. Querying the store (calling `fetch`, `key?`, etc.) while traversing MUST be allowed. * If no block is supplied, `each_key` MUST return an `Enumerator` that can be used to traverse each key (e.g. by calling `each`). Calling methods on the `Enumerator` such as `each` with a block MUST return the store object. * If a block is supplied, that block MUST be called once with each traversed key as the only argument. When called in this way, `each_key` MUST return the store object. ### `values_at(*keys[Array], **options[Hash]) => Array` Returns an array containing the values associated with the given keys, in the same order as the supplied keys. If a key is not present in the key-value-store, `nil` MUST be returned in its place. For each key, and each value, the same restrictions apply as apply to individual keys passed to, and values received from the store in the specification of `[]` (see above). The adapter MAY perform this operation atomically. ### `fetch_values(*keys[Array], **options[Hash], &defaults) => Array` Behaves identically to `values_at`, except that it MUST accept an optional block. When supplied, the block will be called successively with each supplied key that is not present in the store. The return value of the block call MUST be used in place of `nil` in returned the array of values. As with `fetch` (above), the adapter MUST NOT store the return value of the block call in the key-value-store. The adapter MAY perform this operation atomically. ### `slice(*keys[Array], **options[Hash]) => ` Returns a collection of key-value pairs corresponding to those supplied keys which are present in the key-value store, and their associated values. A key MUST be present in the return value if and only if it was supplied in the `keys` parameter and it is present in the key-value store. For each key, and each value, the same restrictions apply as apply to individual keys passed to, and values received from the store in the specification of `[]` (see above). The adapter MAY perform this operation atomically. ### `merge!(pairs[], options[Hash] => {}, &block) => self` Stores the pairs in the key-value-store, and returns the store object. This method MUST behave identically to successively calling `[]=` with each key-value pair and the options hash; except that the adapter MAY perform this operation atomically, and the method MUST accept an optional block, which MUST be called for each key that is to be overwritten. When the block is provided, it MUST be called before overwriting any existing values with the key, old value and supplied value, and the return value of the block MUST be used in place of the supplied value. `merge!` MUST also be aliased as `update`. ## Additional Options Hashes The following methods may all take an additional Hash as a final argument. This allows the client to send additional options which can be specified by the adapter (and which may be specified by extensions to this specification). The methods MUST NOT modify the supplied option hash. * fetch * load * store * delete * key? * increment * clear * merge! Additionally, the following methods accept options as keyword arguments, after non-keyword arguments. These keyword arguments are treated as a hash, equivalent to supplying a hash to the above methods. * values_at * fetch_values * slice In the case of methods with optional arguments, the Hash MUST be provided as the final argument. Keys in this Hash MUST be Symbols. ## Atomicity The base Moneta specification does not specify any atomicity guarantees. However, extensions to this spec may specify extensions that define additional guarantees for any of the defined operations. moneta-1.5.2/feature_matrix.yaml000066400000000000000000000214101433316074200167060ustar00rootroot00000000000000--- group: Persistent stores features: [ persist ] notes: poor multiprocess performance: > Sqlite/YAML/PStore are multiprocess safe, but the performance suffers badly since the whole database file must be locked for writing. Use a key/value server if you want multiprocess concurrency! backends: - adapter: Mongo platforms: [ MRI, JRuby ] gems: mongo features: [ threadsafe, multiprocess, increment, create, expires, each_key, bulk_read, bulk_write ] description: "[MongoDB](http://www.mongodb.org/) database" - adapter: Redis platforms: [ MRI, JRuby ] gems: redis features: [ threadsafe, multiprocess, increment, create, expires, each_key, bulk_read, bulk_write ] description: "[Redis](http://redis.io/) database" - adapter: ActiveRecord platforms: [ MRI, JRuby ] gems: activerecord features: [ threadsafe, multiprocess, increment, create, each_key, bulk_read, bulk_write ] description: "[ActiveRecord](https://rubygems.org/gems/activerecord) ORM" - adapter: File platforms: [ MRI, JRuby ] features: [ threadsafe, multiprocess, increment, create, each_key ] description: "File store" - adapter: LMDB platforms: [ MRI ] gems: lmdb features: [ threadsafe, multiprocess, increment, create, each_key, bulk_read, bulk_write ] description: "[Symas Lightning Memory-Mapped Database (LMDB)](http://symas.com/lmdb)" - adapter: Sequel platforms: [ MRI, JRuby ] gems: sequel features: [ threadsafe, multiprocess, increment, create, each_key, bulk_read, bulk_write ] description: "[Sequel](http://sequel.rubyforge.org/) ORM" - adapter: TokyoTyrant platforms: [ MRI, JRuby ] gems: tokyotyrant or ruby-tokyotyrant features: [ multiprocess, increment, create, bulk_read ] description: "[TokyoTyrant](http://fallabs.com/tokyotyrant/) database" - adapter: PStore platforms: [ MRI ] features: [ multiprocess, increment, create, each_key, bulk_read, bulk_write ] description: "[PStore](http://ruby-doc.org/stdlib/libdoc/pstore/rdoc/PStore.html) store" notes: multiprocess: poor multiprocess performance - adapter: YAML platforms: [ MRI, JRuby ] features: [ multiprocess, increment, create, each_key ] description: "[YAML](http://www.ruby-doc.org/stdlib/libdoc/yaml/rdoc/YAML/Store.html) store" notes: multiprocess: poor multiprocess performance - adapter: Sqlite platforms: [ MRI ] gems: sqlite3 features: [ multiprocess, increment, create, bulk_read, bulk_write, each_key ] description: "[Sqlite3](http://sqlite.org/) database" unknown: [ threadsafe ] notes: multiprocess: poor multiprocess performance - adapter: Daybreak platforms: [ MRI, JRuby ] gems: daybreak features: [ multiprocess, increment, create, each_key, bulk_write ] description: "Incredibly fast pure-ruby key/value store [Daybreak](http://propublica.github.com/daybreak/)" - adapter: DBM platforms: [ MRI ] features: [ increment, create, each_key, bulk_read, bulk_write ] description: "[Berkeley DB using DBM interface or NDBM (Depends on Ruby environment)](http://www.ruby-doc.org/stdlib/libdoc/dbm/rdoc/DBM.html)" - adapter: GDBM platforms: [ MRI, JRuby ] gems: ffi-gdbm on JRuby features: [ increment, create, each_key, bulk_read, bulk_write ] description: "[GDBM](http://www.ruby-doc.org/stdlib/libdoc/gdbm/rdoc/GDBM.html) database" - adapter: LevelDB platforms: [ MRI ] gems: leveldb features: [ increment, create, each_key, bulk_read, bulk_write ] description: "[LevelDB](http://code.google.com/p/leveldb/) database" - adapter: SDBM platforms: [ MRI ] features: [ increment, create, each_key, bulk_read, bulk_write ] description: "[SDBM](http://www.ruby-doc.org/stdlib/libdoc/sdbm/rdoc/SDBM.html) database" - adapter: TDB platforms: [ MRI ] gems: tdb features: [ increment, create, each_key ] description: "[TDB](http://tdb.samba.org/) database" - adapter: KyotoCabinet platforms: [ MRI ] gems: kyotocabinet-ruby or kyotocabinet-ruby-reanimated features: [ increment, create, each_key, bulk_read, bulk_write ] description: "[KyotoCabinet](http://fallabs.com/kyotocabinet/) database" - adapter: TokyoCabinet platforms: [ MRI ] gems: tokyocabinet features: [ increment, create, each_key ] description: "[TokyoCabinet](http://fallabs.com/tokyocabinet/) database" - adapter: DataMapper platforms: [ MRI ] gems: dm-core, dm-migrations features: [ threadsafe, multiprocess, create ] description: "[DataMapper](http://datamapper.org/) ORM" - adapter: Couch platforms: [ MRI, JRuby ] gems: faraday, multi_json features: [ multiprocess, create, each_key, bulk_read, bulk_write ] description: "[CouchDB](http://couchdb.apache.org/) database" - adapter: HBase platforms: [ Unstable ] gems: hbaserb features: [ multiprocess, increment ] unknown: [threadsafe] description: "[HBase](http://hbase.apache.org/) database" - adapter: Cassandra platforms: [ MRI, JRuby ] gems: cassandra features: [ multiprocess, expires, each_key, bulk_read, bulk_write ] unknown: [threadsafe] description: "[Cassandra](http://cassandra.apache.org/) distributed database" - adapter: LocalMemCache platforms: [ MRI ] gems: localmemcache features: [ threadsafe, multiprocess ] description: "[LocalMemCache](http://localmemcache.rubyforge.org/) database" - adapter: Fog platforms: [ MRI, JRuby ] gems: fog features: [multiprocess] unknown: [threadsafe] description: "[Fog](http://fog.io/) cloud store" - adapter: Riak platforms: [ Unstable ] gems: riak-client features: [multiprocess] description: "[Riak](http://docs.basho.com/) database" --- group: Non-persistent stores notes: memcached protocol: > There are some servers which use the memcached protocol but which are persistent (e.g. [MemcacheDB](http://memcachedb.org/), [Kai](http://sourceforge.net/apps/mediawiki/kai), [IronCache](http://dev.iron.io/cache/reference/memcache/), [Roma](https://github.com/roma/roma/tree), [Flare](http://labs.gree.jp/Top/OpenSource/Flare-en.html) and [Kumofs](https://github.com/etolabo/kumofs)) memcached backend: > This feature is only available if the dalli backend is selected in-memory multiprocess caveat: > Store is multi-process safe because it is an in-memory store, values are not shared between multiple processes backends: - adapter: MemcachedDalli platforms: [ MRI, JRuby ] gems: dalli features: [ threadsafe, multiprocess, increment, create, expires, bulk_read, bulk_write ] description: "[Memcached](http://memcached.org/) database with Dalli library" notes: persist: memcached protocol - adapter: Memcached platforms: [ MRI ] gems: dalli or memcached features: [ multiprocess, increment, create, expires ] unknown: [ threadsafe, bulk_read, bulk_write, JRuby ] description: "[Memcached](http://memcached.org/) database" notes: persist: memcached protocol JRuby: memcached backend threadsafe: memcached backend bulk_read: memcached backend bulk_write: memcached backend - adapter: MemcachedNative platforms: [ MRI ] gems: memcached features: [ multiprocess, increment, create, expires ] description: "Memcached database with native library" notes: persist: memcached protocol - adapter: Cookie platforms: [ MRI, JRuby ] features: [ multiprocess, increment, create, expires, each_key ] description: "Cookie in memory store" notes: multiprocess: in-memory multiprocess caveat - adapter: LRUHash platforms: [ MRI, JRuby ] features: [ multiprocess, increment, create, each_key ] description: "LRU memory store" notes: multiprocess: in-memory multiprocess caveat - adapter: Memory platforms: [ MRI, JRuby ] features: [ multiprocess, increment, create, each_key, bulk_read, bulk_write ] description: Memory store notes: multiprocess: in-memory multiprocess caveat - adapter: "Null" platforms: [ MRI, JRuby ] features: [ threadsafe, multiprocess ] description: "No database" --- group: Network clients notes: depends on server: Depends on server backends: - adapter: Client platforms: [ MRI, JRuby ] features: [multiprocess] unknown: [ increment, create, expires, persist, each_key ] description: "Moneta client adapter" notes: increment: depends on server create: depends on server expires: depends on server persist: depends on server each_key: depends on server - adapter: RestClient platforms: [ MRI, JRuby ] features: [ multiprocess ] unknown: [ persist ] description: "Moneta REST client adapter" notes: persist: depends on server moneta-1.5.2/lib/000077500000000000000000000000001433316074200135535ustar00rootroot00000000000000moneta-1.5.2/lib/action_dispatch/000077500000000000000000000000001433316074200167075ustar00rootroot00000000000000moneta-1.5.2/lib/action_dispatch/middleware/000077500000000000000000000000001433316074200210245ustar00rootroot00000000000000moneta-1.5.2/lib/action_dispatch/middleware/session/000077500000000000000000000000001433316074200225075ustar00rootroot00000000000000moneta-1.5.2/lib/action_dispatch/middleware/session/moneta_store.rb000066400000000000000000000005441433316074200255360ustar00rootroot00000000000000require 'rack/session/moneta' require 'action_dispatch/middleware/session/abstract_store' module ActionDispatch module Session # A Rails cache backed by any Moneta store class MonetaStore < Rack::Session::Moneta include Compatibility include StaleSessionCheck include SessionObject if defined?(SessionObject) end end end moneta-1.5.2/lib/active_support/000077500000000000000000000000001433316074200166225ustar00rootroot00000000000000moneta-1.5.2/lib/active_support/cache/000077500000000000000000000000001433316074200176655ustar00rootroot00000000000000moneta-1.5.2/lib/active_support/cache/moneta_store.rb000066400000000000000000000114521433316074200227140ustar00rootroot00000000000000module ActiveSupport module Cache # @api public class MonetaStore < Store def initialize(options = nil) raise ArgumentError, 'Option :store is required' unless @store = options.delete(:store) @store = ::Moneta.new(@store, expires: true) if Symbol === @store super(options) extend Strategy::LocalCache end def increment(key, amount = 1, options = nil) options = merged_options(options) instrument(:increment, key, amount: amount) do @store.increment(normalize_key(key, options), amount, moneta_options(options)) end end def decrement(key, amount = 1, options = nil) options = merged_options(options) instrument(:decrement, key, amount: amount) do @store.increment(normalize_key(key, options), -amount, moneta_options(options)) end end def clear(options = nil) options = merged_options(options) instrument(:clear, nil, nil) do @store.clear(moneta_options(options)) end end # This prevents underlying Moneta transformers from erroring on raw values def exist?(name, options = {}) super rescue super(name, options.merge(raw: true)) end # These are the rails 5.2 versions of these methods, which call into the # lower-level read_multi_entries and write_multi_entries methods. We # define them here only if the superclass versions don't use the *_entries # methods. unless [:read_multi_entries, :write_multi_entries].all? { |m| superclass.private_instance_methods.include? m } def fetch_multi(*names) raise ArgumentError, "Missing block: `Cache#fetch_multi` requires a block." \ unless block_given? options = names.extract_options! options = merged_options(options) instrument :read_multi, names, options do |payload| read_multi_entries(names, options).tap do |results| payload[:hits] = results.keys payload[:super_operation] = :fetch_multi writes = {} (names - results.keys).each do |name| results[name] = writes[name] = yield(name) end write_multi writes, options end end end def read_multi(*names) options = names.extract_options! options = merged_options(options) instrument :read_multi, names, options do |payload| read_multi_entries(names, options).tap do |results| payload[:hits] = results.keys end end end def write_multi(hash, options = nil) options = merged_options(options) instrument :write_multi, hash, options do entries = hash.each_with_object({}) do |(name, value), memo| memo[normalize_key(name, options)] = \ Entry.new(value, options.merge(version: normalize_version(name, options))) end write_multi_entries entries, options end end end protected def make_entry(value) case value when ActiveSupport::Cache::Entry, nil value else ActiveSupport::Cache::Entry.new(value) end end def read_entry(key, options) make_entry(@store.load(key, moneta_options(options, false))) end def write_entry(key, entry, options) value = options[:raw] ? entry.value.to_s : entry @store.store(key, value, moneta_options(options)) true end def delete_entry(key, options) @store.delete(key, moneta_options(options)) true end def read_multi_entries(names, options) keys = names.map { |name| normalize_key(name, options) } entries = @store .values_at(*keys, **moneta_options(options, false)) .map(&method(:make_entry)) names.zip(keys, entries).map do |name, key, entry| next if entry == nil delete_entry(key, options) if entry.expired? next if entry.expired? || entry.mismatched?(normalize_version(name, options)) [name, entry.value] end.compact.to_h end def write_multi_entries(hash, options) pairs = if options[:raw] hash.transform_values { |entry| entry.value.to_s } else hash end @store.merge!(pairs, moneta_options(options)) hash end private def moneta_options(options, include_expires = true) new_options = options ? options.dup : {} if new_options.include?(:expires_in) and include_expires new_options[:expires] = new_options.delete(:expires_in).to_r end new_options end end end end moneta-1.5.2/lib/moneta.rb000066400000000000000000000175131433316074200153720ustar00rootroot00000000000000# Provides two methods for constructing stores: # # * {Moneta.new} # * {Moneta.build} module Moneta autoload :Adapter, 'moneta/adapter' autoload :Builder, 'moneta/builder' autoload :Cache, 'moneta/cache' autoload :CreateSupport, 'moneta/create_support' autoload :Config, 'moneta/config' autoload :DBMAdapter, 'moneta/dbm_adapter' autoload :Defaults, 'moneta/defaults' autoload :EachKeySupport, 'moneta/each_key_support' autoload :Enumerable, 'moneta/enumerable' autoload :ExpiresSupport, 'moneta/expires_support' autoload :Expires, 'moneta/expires' autoload :Fallback, 'moneta/fallback' autoload :HashAdapter, 'moneta/hash_adapter' autoload :IncrementSupport, 'moneta/increment_support' autoload :Lock, 'moneta/lock' autoload :Logger, 'moneta/logger' autoload :Mutex, 'moneta/synchronize' autoload :NilValues, 'moneta/nil_values' autoload :OptionMerger, 'moneta/optionmerger' autoload :OptionSupport, 'moneta/option_support' autoload :Pool, 'moneta/pool' autoload :Proxy, 'moneta/proxy' autoload :Semaphore, 'moneta/synchronize' autoload :Server, 'moneta/server' autoload :Shared, 'moneta/shared' autoload :Stack, 'moneta/stack' autoload :Transformer, 'moneta/transformer' autoload :Utils, 'moneta/utils' autoload :WeakCreate, 'moneta/weak' autoload :WeakEachKey, 'moneta/weak_each_key' autoload :WeakIncrement, 'moneta/weak' autoload :Wrapper, 'moneta/wrapper' # Adapters are classes which wrap databases, services etc., as described in # {file:SPEC.md The Moneta Specification}. module Adapters autoload :ActiveRecord, 'moneta/adapters/activerecord' autoload :ActiveSupportCache, 'moneta/adapters/activesupportcache' autoload :Cassandra, 'moneta/adapters/cassandra' autoload :Client, 'moneta/adapters/client' autoload :Cookie, 'moneta/adapters/cookie' autoload :Couch, 'moneta/adapters/couch' autoload :Daybreak, 'moneta/adapters/daybreak' autoload :DBM, 'moneta/adapters/dbm' autoload :DataMapper, 'moneta/adapters/datamapper' autoload :File, 'moneta/adapters/file' autoload :Fog, 'moneta/adapters/fog' autoload :GDBM, 'moneta/adapters/gdbm' autoload :HBase, 'moneta/adapters/hbase' autoload :LRUHash, 'moneta/adapters/lruhash' autoload :KyotoCabinet, 'moneta/adapters/kyotocabinet' autoload :LevelDB, 'moneta/adapters/leveldb' autoload :LMDB, 'moneta/adapters/lmdb' autoload :LocalMemCache, 'moneta/adapters/localmemcache' autoload :Memcached, 'moneta/adapters/memcached' autoload :MemcachedDalli, 'moneta/adapters/memcached/dalli' autoload :MemcachedNative, 'moneta/adapters/memcached/native' autoload :Memory, 'moneta/adapters/memory' autoload :Mongo, 'moneta/adapters/mongo' autoload :Null, 'moneta/adapters/null' autoload :PStore, 'moneta/adapters/pstore' autoload :Redis, 'moneta/adapters/redis' autoload :RestClient, 'moneta/adapters/restclient' autoload :Riak, 'moneta/adapters/riak' autoload :SDBM, 'moneta/adapters/sdbm' autoload :Sequel, 'moneta/adapters/sequel' autoload :Sqlite, 'moneta/adapters/sqlite' autoload :TDB, 'moneta/adapters/tdb' autoload :TokyoCabinet, 'moneta/adapters/tokyocabinet' autoload :TokyoTyrant, 'moneta/adapters/tokyotyrant' autoload :YAML, 'moneta/adapters/yaml' end # Create new Moneta store with default proxies # # This works in most cases if you don't want fine # control over the proxy stack. It uses Marshal on the # keys and values. Use Moneta#build if you want to have fine control! # # @param [Symbol] name Name of adapter (See Moneta::Adapters) # @param [Hash] options # @return [Moneta store] newly created Moneta store # @option options [Boolean/Integer] :expires Ensure that store supports expiration by inserting # {Expires} if the underlying adapter doesn't support it natively # and set default expiration time # @option options [Boolean] :threadsafe (false) Ensure that the store is thread safe by inserting Moneta::Lock # @option options [Boolean/Hash] :logger (false) Add logger to proxy stack (Hash is passed to logger as options) # @option options [Boolean/Symbol] :compress (false) If true, compress value with zlib, or specify custom compress, e.g. :quicklz # @option options [Symbol] :serializer (:marshal) Serializer used for key and value, disable with nil # @option options [Symbol] :key_serializer (options[:serializer]) Serializer used for key, disable with nil # @option options [Symbol] :value_serializer (options[:serializer]) Serializer used for value, disable with nil # @option options [String] :prefix Key prefix used for namespacing (default none) # @option options All other options passed to the adapter # # Supported adapters: # * :HashFile (Store which spreads the entries using a md5 hash, e.g. cache/42/391dd7535aebef91b823286ac67fcd) # * :File (normal file store) # * :Memcached (Memcached store) # * ... (All other adapters from Moneta::Adapters) # # @api public def self.new(name, options = {}) expires = options[:expires] options.delete(:expires) unless Numeric === expires logger = options.delete(:logger) threadsafe = options.delete(:threadsafe) compress = options.delete(:compress) serializer = options.include?(:serializer) ? options.delete(:serializer) : :marshal key_serializer = options.include?(:key_serializer) ? options.delete(:key_serializer) : serializer value_serializer = options.include?(:value_serializer) ? options.delete(:value_serializer) : serializer transformer = { key: [key_serializer, :prefix], value: [value_serializer], prefix: options.delete(:prefix) } transformer[:value] << (Symbol === compress ? compress : :zlib) if compress raise ArgumentError, 'Name must be Symbol' unless Symbol === name case name when :Sequel # Sequel accept only base64 keys transformer[:key] << :base64 # If using HStore, binary data is not allowed transformer[:value] << :base64 if options[:hstore] when :ActiveRecord, :DataMapper # DataMapper and AR accept only base64 keys and values transformer[:key] << :base64 transformer[:value] << :base64 when :Couch # CouchDB needs to use URL-safe Base64 for its keys transformer[:key] << :urlsafe_base64 transformer[:value] << :base64 when :PStore, :YAML, :Null # For PStore and YAML only the key has to be a string transformer.delete(:value) if transformer[:value] == [:marshal] when :HashFile # Use spreading hashes transformer[:key] << :md5 << :spread name = :File when :File, :Riak, :RestClient # Use escaping for file and HTTP interfaces transformer[:key] << :escape end a = Adapters.const_get(name).new(options) build do use :Logger, Hash === logger ? logger : {} if logger use :Expires, expires: options[:expires] if expires && !a.supports?(:expires) use :Transformer, transformer use :Lock if threadsafe adapter a end end # Configure your own Moneta proxy stack # # @yieldparam Builder block # @return [Moneta store] newly created Moneta store # # @example Moneta builder # Moneta.build do # use :Expires # adapter :Memory # end # # @api public def self.build(&block) Builder.new(&block).build.last end end moneta-1.5.2/lib/moneta/000077500000000000000000000000001433316074200150365ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/adapter.rb000066400000000000000000000031261433316074200170050ustar00rootroot00000000000000module Moneta # Adapter base class # @api public class Adapter include Defaults include Config attr_reader :backend class << self # Define a block used to build this adapter's backend. The block will # receive as keyword arguments any options passed to the adapter during # initialization that are not config settings. # # If the adapter is initialized with a `:backend` option, this will be used # instead, and the block won't be called. # # @param [Boolean] required # @yield [**options] options passed to the adapter's initialize method # @yieldreturn [Object] The backend to use def backend(required: true, &block) raise "backend block already set" if class_variables(false).include?(:@@backend_block) class_variable_set(:@@backend_block, block) class_variable_set(:@@backend_required, true) if required end def backend_block class_variable_get(:@@backend_block) if class_variable_defined?(:@@backend_block) end def backend_required? class_variable_defined?(:@@backend_required) end end # @param [Hash] options def initialize(options = {}) set_backend(**configure(**options)) end private def set_backend(backend: nil, **options) @backend = backend || if backend_block = self.class.backend_block instance_exec(**options, &backend_block) end raise ArgumentError, 'backend needs to be set - refer to adapter documentation' if !@backend && self.class.backend_required? end end end moneta-1.5.2/lib/moneta/adapters/000077500000000000000000000000001433316074200166415ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/adapters/activerecord.rb000066400000000000000000000263641433316074200216530ustar00rootroot00000000000000require 'active_record' require 'uri' module Moneta module Adapters # ActiveRecord as key/value stores # @api public class ActiveRecord < Adapter supports :create, :increment, :each_key attr_reader :table delegate :with_connection, to: :connection_pool @connection_lock = ::Mutex.new class << self attr_reader :connection_lock delegate :configurations, :configurations=, :connection_handler, to: ::ActiveRecord::Base def retrieve_connection_pool(spec_name) connection_handler.retrieve_connection_pool(spec_name.to_s) end def establish_connection(spec_name) connection_lock.synchronize do if connection_pool = retrieve_connection_pool(spec_name) connection_pool else connection_handler.establish_connection(spec_name.to_sym) end end end def retrieve_or_establish_connection_pool(spec_name) retrieve_connection_pool(spec_name) || establish_connection(spec_name) end end config :key_column, default: :k config :value_column, default: :v backend required: false do |table: :moneta, connection: nil, create_table: nil| @spec = spec_for_connection(connection) # Ensure the table name is a symbol. table_name = table.to_sym if create_table == nil default_create_table(table_name) elsif create_table with_connection(&create_table) end @table = ::Arel::Table.new(table_name) # backend is only used if there's an existing ActiveRecord model nil end # @param [Hash] options # @option options [Object] :backend A class object inheriting from ActiveRecord::Base to use as a table # @option options [String,Symbol] :table (:moneta) Table name # @option options [Hash/String/Symbol] :connection ActiveRecord connection configuration (`Hash` or `String`), or # symbol giving the name of a Rails connection (e.g. :production) # @option options [Proc, Boolean] :create_table Proc called with a connection if table # needs to be created. Pass false to skip the create table check all together. # @option options [Symbol] :key_column (:k) The name of the column to use for keys # @option options [Symbol] :value_column (:v) The name of the column to use for values def initialize(options = {}) super # If a :backend was provided, use it to set the spec and table if backend @spec = backend.connection_pool.spec @table = ::Arel::Table.new(backend.table_name) end end # (see Proxy#key?) def key?(key, options = {}) with_connection do |conn| sel = arel_sel_key(key).project(::Arel.sql('1')) result = conn.select_all(sel) !result.empty? end end # (see Proxy#each_key) def each_key(&block) with_connection do |conn| return enum_for(:each_key) { conn.select_value(arel_sel.project(table[config.key_column].count)) } unless block_given? conn.select_values(arel_sel.project(table[config.key_column])).each { |k| yield(k) } end self end # (see Proxy#load) def load(key, options = {}) with_connection do |conn| conn_sel_value(conn, key) end end # (see Proxy#store) def store(key, value, options = {}) with_connection do |conn| encoded = encode(conn, value) conn_ins(conn, key, encoded) unless conn_upd(conn, key, encoded) == 1 end value end # (see Proxy#delete) def delete(key, options = {}) with_connection do |conn| conn.transaction do sel = arel_sel_key(key).project(table[config.value_column]).lock value = decode(conn, conn.select_value(sel)) del = arel_del.where(table[config.key_column].eq(key)) conn.delete(del) value end end end # (see Proxy#increment) def increment(key, amount = 1, options = {}) with_connection do |conn| begin conn_ins(conn, key, amount.to_s) amount rescue ::ActiveRecord::RecordNotUnique conn.transaction do sel = arel_sel_key(key).project(table[config.value_column]).lock value = decode(conn, conn.select_value(sel)) value = (value ? Integer(value) : 0) + amount # Re-raise if the upate affects no rows (i.e. row deleted after attempted insert, # before select for update) raise unless conn_upd(conn, key, value.to_s) == 1 value end end end rescue ::ActiveRecord::RecordNotUnique, ::ActiveRecord::Deadlocked # This handles the "no row updated" issue, above, as well as deadlocks # which may occur on some adapters tries ||= 0 (tries += 1) <= 3 ? retry : raise end # (see Proxy#create) def create(key, value, options = {}) with_connection do |conn| conn_ins(conn, key, value) true end rescue ::ActiveRecord::RecordNotUnique false end # (see Proxy#clear) def clear(options = {}) with_connection do |conn| conn.delete(arel_del) end self end # (see Proxy#close) def close @table = nil @spec = nil end # (see Proxy#slice) def slice(*keys, lock: false, **options) with_connection do |conn| conn.create_table(:slice_keys, temporary: true) do |t| t.string :key, null: false end begin temp_table = ::Arel::Table.new(:slice_keys) keys.each do |key| conn.insert ::Arel::InsertManager.new .into(temp_table) .insert([[temp_table[:key], key]]) end sel = arel_sel .join(temp_table) .on(table[config.key_column].eq(temp_table[:key])) .project(table[config.key_column], table[config.value_column]) sel = sel.lock if lock result = conn.select_all(sel) k = config.key_column.to_s v = config.value_column.to_s result.map do |row| [row[k], decode(conn, row[v])] end ensure conn.drop_table(:slice_keys) end end end # (see Proxy#values_at) def values_at(*keys, **options) hash = Hash[slice(*keys, **options)] keys.map { |key| hash[key] } end # (see Proxy#fetch_values) def fetch_values(*keys, **options) return values_at(*keys, **options) unless block_given? hash = Hash[slice(*keys, **options)] keys.map do |key| if hash.key?(key) hash[key] else yield key end end end # (see Proxy#merge!) def merge!(pairs, options = {}) with_connection do |conn| conn.transaction do existing = Hash[slice(*pairs.map { |k, _| k }, lock: true, **options)] update_pairs, insert_pairs = pairs.partition { |k, _| existing.key?(k) } insert_pairs.each { |key, value| conn_ins(conn, key, encode(conn, value)) } if block_given? update_pairs.map! do |key, new_value| [key, yield(key, existing[key], new_value)] end end update_pairs.each { |key, value| conn_upd(conn, key, encode(conn, value)) } end end self end private def connection_pool self.class.retrieve_or_establish_connection_pool(@spec) end def default_create_table(table_name) with_connection do |conn| return if conn.table_exists?(table_name) # Prevent multiple connections from attempting to create the table simultaneously. self.class.connection_lock.synchronize do conn.create_table(table_name, id: false) do |t| # Do not use binary key (Issue #17) t.string config.key_column, null: false t.binary config.value_column end conn.add_index(table_name, config.key_column, unique: true) end end end def arel_del ::Arel::DeleteManager.new.from(table) end def arel_sel ::Arel::SelectManager.new.from(table) end def arel_upd ::Arel::UpdateManager.new.table(table) end def arel_sel_key(key) arel_sel.where(table[config.key_column].eq(key)) end def conn_ins(conn, key, value) ins = ::Arel::InsertManager.new.into(table) ins.insert([[table[config.key_column], key], [table[config.value_column], value]]) conn.insert ins end def conn_upd(conn, key, value) conn.update arel_upd.where(table[config.key_column].eq(key)).set([[table[config.value_column], value]]) end def conn_sel_value(conn, key) decode(conn, conn.select_value(arel_sel_key(key).project(table[config.value_column]))) end def encode(conn, value) if value == nil nil elsif conn.respond_to?(:escape_bytea) conn.escape_bytea(value) elsif defined?(::ActiveRecord::ConnectionAdapters::SQLite3Adapter) && conn.is_a?(::ActiveRecord::ConnectionAdapters::SQLite3Adapter) Arel::Nodes::SqlLiteral.new("X'#{value.unpack('H*').first}'") else value end end def decode(conn, value) if value == nil nil elsif defined?(::ActiveModel::Type::Binary::Data) && value.is_a?(::ActiveModel::Type::Binary::Data) value.to_s elsif conn.respond_to?(:unescape_bytea) conn.unescape_bytea(value) else value end end # Feed the connection info into ActiveRecord and get back a name to use # for getting the connection pool def spec_for_connection(connection) case connection when Symbol connection when Hash, String # Normalize the connection specification to a hash resolver = ::ActiveRecord::ConnectionAdapters::ConnectionSpecification::Resolver.new \ 'dummy' => connection # Turn the config into a standardised hash, sans a couple of bits hash = resolver.resolve(:dummy) hash.delete('name') hash.delete(:password) # For security # Make a name unique to this config name = 'moneta?' + URI.encode_www_form(hash.to_a.sort) # Add into configurations unless its already there (initially done without locking for # speed) unless self.class.configurations.key? name self.class.connection_lock.synchronize do self.class.configurations[name] = connection \ unless self.class.configurations.key? name end end name.to_sym else ::ActiveRecord::Base.connection_pool.spec.name.to_s end end end end end moneta-1.5.2/lib/moneta/adapters/activesupportcache.rb000066400000000000000000000071051433316074200230650ustar00rootroot00000000000000module Moneta module Adapters # ActiveSupport::Cache::Store adapter # @api public class ActiveSupportCache < Adapter include ExpiresSupport supports :increment # @!method initialize(options = {}) # @param [Hash] options # @option options [ActiveSupport::Cache::Store] :backend (Rails.cache) Cache store to use # @option options [Numeric] :expires default expiration in seconds backend { Rails.cache if defined?(Rails) } # (see Proxy#key?) def key?(key, options = {}) backend.exist?(key).tap do |exists| if exists && (expires = expires_value(options, nil)) != nil value = backend.read(key, options) backend.write(key, value, options.merge(expires_in: expires ? expires.seconds : nil)) end end end # (see Proxy#load) def load(key, options = {}) expires = expires_value(options, nil) value = backend.read(key, options) if value and expires != nil backend.write(key, value, options.merge(expires_in: expires ? expires.seconds : nil)) end value end # (see Proxy#store) def store(key, value, options = {}) expires = expires_value(options) backend.write(key, value, options.merge(expires_in: expires ? expires.seconds : nil)) value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) expires = expires_value(options) options.delete(:raw) existing = Integer(backend.fetch(key, options.merge(raw: true)) { 0 }) if amount > 0 backend.increment(key, amount, options.merge(expires_in: expires ? expires.seconds : nil)) elsif amount < 0 backend.decrement(key, -amount, options.merge(expires_in: expires ? expires.seconds : nil)) else existing end end # (see Proxy#delete) def delete(key, options = {}) value = backend.read(key, options) if value != nil backend.delete(key, options) options[:raw] ? value.to_s : value end end # (see Proxy#clear) def clear(options = {}) backend.clear self end # (see Proxy#slice) def slice(*keys, **options) hash = backend.read_multi(*keys) if (expires = expires_value(options, nil)) != nil hash.each do |key, value| backend.write(key, value, options.merge(expires_in: expires ? expires.seconds : nil)) end end if options[:raw] hash.each do |key, value| hash[key] = value.to_s if value != nil end end hash end # (see Proxy#values_at) def values_at(*keys, **options) hash = slice(*keys, **options) keys.map { |key| hash[key] } end # (see Proxy#merge!) def merge!(pairs, options = {}) if block_given? existing = slice(*pairs.map { |k, _| k }, **options) pairs = pairs.map do |key, new_value| if existing.key?(key) new_value = yield(key, existing[key], new_value) end [key, new_value] end end hash = Hash === pairs ? pairs : Hash[pairs.to_a] expires = expires_value(options) backend.write_multi(hash, options.merge(expires_in: expires ? expires.seconds : nil)) self end private def expires_value(options, default = config.expires) super.tap { options.delete(:expires) unless options.frozen? } end end end end moneta-1.5.2/lib/moneta/adapters/cassandra.rb000066400000000000000000000304241433316074200211300ustar00rootroot00000000000000require 'cassandra' module Moneta module Adapters # Cassandra backend # @api public class Cassandra < Adapter include ExpiresSupport supports :each_key config :table, default: 'moneta' config :key_column, default: 'key' config :value_column, default: 'value' config :updated_column, default: 'updated_at' config :expired_column, default: 'expired' config :read_consistency, default: :all config :write_consistency, default: :all backend do |keyspace: 'moneta', cluster: nil, create_keyspace: nil, **options| cluster ||= ::Cassandra.cluster(options).tap do |own_cluster| @own_cluster = own_cluster end begin cluster.connect(keyspace) rescue ::Cassandra::Errors::InvalidError backend = cluster.connect create_keyspace(backend, keyspace, create_keyspace) backend.execute("USE " + keyspace) backend end end # @param [Hash] options # @option options [String] :keyspace ('moneta') Cassandra keyspace # @option options [String] :table ('moneta') Cassandra table # @option options [String] :host ('127.0.0.1') Server host name # @option options [Integer] :port (9160) Server port # @option options [Integer] :expires Default expiration time # @option options [String] :key_column ('key') Name of the key column # @option options [String] :value_column ('value') Name of the value # column # @option options [String] :updated_column ('updated_at') Name of the # column used to track last update # @option options [String] :expired_column ('expired') Name of the column # used to track expiry # @option options [Symbol] :read_consistency (:all) Default read # consistency # @option options [Symbol] :write_consistency (:all) Default write # consistency # @option options [Proc, Boolean, Hash] :create_keyspace Provide a proc # for creating the keyspace, or a Hash of options to use when creating # it, or set to false to disable. The Proc will only be called if the # keyspace does not already exist. # @option options [::Cassandra::Cluster] :cluster Existing cluster to use # @option options [::Cassandra::Session] :backend Existing session to use # @option options Other options passed to `Cassandra#cluster` def initialize(options = {}) super backend.execute <<-CQL CREATE TABLE IF NOT EXISTS #{config.table} ( #{config.key_column} blob, #{config.value_column} blob, #{config.updated_column} timeuuid, #{config.expired_column} boolean, PRIMARY KEY (#{config.key_column}, #{config.updated_column}) ) CQL prepare_statements end # (see Proxy#key?) def key?(key, options = {}) rc, wc = consistency(options) if (expires = expires_value(options, nil)) != nil # Because Cassandra expires each value in a column, rather than the # whole column, when we want to update the expiry we load the value # and then re-set it in order to update the TTL. return false unless row = backend.execute(@load, options.merge(consistency: rc, arguments: [key])).first and row[config.expired_column] != nil backend.execute(@update_expires, options.merge(consistency: wc, arguments: [(expires || 0).to_i, timestamp, row[config.value_column], key, row[config.updated_column]])) true elsif row = backend.execute(@key, options.merge(consistency: rc, arguments: [key])).first row[config.expired_column] != nil else false end end # (see Proxy#load) def load(key, options = {}) rc, wc = consistency(options) if row = backend.execute(@load, options.merge(consistency: rc, arguments: [key])).first and row[config.expired_column] != nil if (expires = expires_value(options, nil)) != nil backend.execute(@update_expires, options.merge(consistency: wc, arguments: [(expires || 0).to_i, timestamp, row[config.value_column], key, row[config.updated_column]])) end row[config.value_column] end end # (see Proxy#store) def store(key, value, options = {}) _, wc = consistency(options) expires = expires_value(options) t = timestamp batch = backend.batch do |batch| batch.add(@store_delete, arguments: [t, key]) batch.add(@store, arguments: [key, value, (expires || 0).to_i, t + 1]) end backend.execute(batch, options.merge(consistency: wc)) value end # (see Proxy#delete) def delete(key, options = {}) rc, wc = consistency(options) result = backend.execute(@delete_value, options.merge(consistency: rc, arguments: [key])) if row = result.first and row[config.expired_column] != nil backend.execute(@delete, options.merge(consistency: wc, arguments: [timestamp, key, row[config.updated_column]])) row[config.value_column] end end # (see Proxy#clear) def clear(options = {}) backend.execute(@clear) self end # (see Proxy#close) def close backend.close_async @backend = nil if @own_cluster @own_cluster.close_async @own_cluster = nil end nil end # (see Proxy#each_key) def each_key rc, = consistency return enum_for(:each_key) unless block_given? result = backend.execute(@each_key, consistency: rc, page_size: 100) loop do result.each do |row| next if row[config.expired_column] == nil yield row[config.key_column] end break if result.last_page? result = result.next_page end self end # (see Proxy#slice) def slice(*keys, **options) rc, wc = consistency(options) result = backend.execute(@slice, options.merge(consistency: rc, arguments: [keys])) expires = expires_value(options, nil) updated = [] if expires != nil pairs = result.map do |row| next if row[config.expired_column] == nil if expires != nil updated << [row[config.key_column], row[config.value_column], row[config.updated_column]] end [row[config.key_column], row[config.value_column]] end.compact if expires != nil && !updated.empty? ttl = (expires || 0).to_i t = timestamp batch = backend.batch do |batch| updated.each do |key, value, updated| batch.add(@update_expires, arguments: [ttl, t, value, key, updated]) end end backend.execute(batch, options.merge(consistency: wc)) end pairs end # (see Proxy#values_at) def values_at(*keys, **options) hash = Hash[slice(*keys, **options)] keys.map { |key| hash[key] } end # (see Proxy#fetch_values) def fetch_values(*keys, **options) return values_at(*keys, **options) unless block_given? hash = Hash[slice(*keys, **options)] keys.map do |key| if hash.key?(key) hash[key] else yield key end end end # (see Proxy#merge!) def merge!(pairs, options = {}) keys = pairs.map { |k, _| k }.to_a return self if keys.empty? if block_given? existing = Hash[slice(*keys, **options)] pairs = pairs.map do |key, new_value| if existing.key?(key) [key, yield(key, existing[key], new_value)] else [key, new_value] end end end _rc, wc = consistency(options) expires = expires_value(options) t = timestamp batch = backend.batch do |batch| batch.add(@merge_delete, arguments: [t, keys]) pairs.each do |key, value| batch.add(@store, arguments: [key, value, (expires || 0).to_i, t + 1]) end end backend.execute(batch, options.merge(consistency: wc)) self end private def timestamp (Time.now.to_r * 1_000_000).to_i end def create_keyspace(backend, keyspace, create_keyspace) options = { replication: { class: 'SimpleStrategy', replication_factor: 1 } } case create_keyspace when Proc return create_keyspace.call(keyspace) when false return when Hash options.merge!(create_keyspace) end # This is a bit hacky, but works. Options in Cassandra look like JSON, # but use single quotes instead of double-quotes. require 'multi_json' option_str = options.map do |key, value| key.to_s + ' = ' + MultiJson.dump(value).tr(?", ?') end.join(' AND ') backend.execute "CREATE KEYSPACE IF NOT EXISTS %s WITH %s" % { keyspace: keyspace, options: option_str } rescue ::Cassandra::Errors::TimeoutError tries ||= 0 (tries += 1) <= 3 ? retry : raise end def prepare_statements @key = backend.prepare(<<-CQL) SELECT #{config.updated_column}, #{config.expired_column} FROM #{config.table} WHERE #{config.key_column} = ? LIMIT 1 CQL @store_delete = backend.prepare(<<-CQL) DELETE FROM #{config.table} USING TIMESTAMP ? WHERE #{config.key_column} = ? CQL @store = backend.prepare(<<-CQL) INSERT INTO #{config.table} (#{config.key_column}, #{config.value_column}, #{config.updated_column}, #{config.expired_column}) VALUES (?, ?, now(), false) USING TTL ? AND TIMESTAMP ? CQL @load = backend.prepare(<<-CQL) SELECT #{config.value_column}, #{config.updated_column}, #{config.expired_column} FROM #{config.table} WHERE #{config.key_column} = ? LIMIT 1 CQL @update_expires = backend.prepare(<<-CQL) UPDATE #{config.table} USING TTL ? AND TIMESTAMP ? SET #{config.value_column} = ?, #{config.expired_column} = false WHERE #{config.key_column} = ? AND #{config.updated_column} = ? CQL @clear = backend.prepare("TRUNCATE #{config.table}") @delete_value = backend.prepare(<<-CQL) SELECT #{config.value_column}, #{config.updated_column}, #{config.expired_column} FROM #{config.table} WHERE #{config.key_column} = ? LIMIT 1 CQL @delete = backend.prepare(<<-CQL, idempotent: true) DELETE FROM #{config.table} USING TIMESTAMP ? WHERE #{config.key_column} = ? AND #{config.updated_column} = ? CQL @each_key = backend.prepare(<<-CQL) SELECT #{config.key_column}, #{config.expired_column} FROM #{config.table} CQL @slice = backend.prepare(<<-CQL) SELECT #{config.key_column}, #{config.value_column}, #{config.updated_column}, #{config.expired_column} FROM #{config.table} WHERE #{config.key_column} IN ? CQL @merge_delete = backend.prepare(<<-CQL) DELETE FROM #{config.table} USING TIMESTAMP ? WHERE #{config.key_column} IN ? CQL end def consistency(options = {}) [ options[:read_consistency] || config.read_consistency, options[:write_consistency] || config.write_consistency ] end end end end moneta-1.5.2/lib/moneta/adapters/client.rb000066400000000000000000000070751433316074200204550ustar00rootroot00000000000000require 'socket' module Moneta module Adapters # Moneta client backend # @api public class Client < Adapter # @!method initialize(options = {}) # @param [Hash] options # @option options [TCPSocket | UNIXSocket] :backend an open socket to use # @option options [Integer] :port (9000) TCP port # @option options [String] :host ('127.0.0.1') Hostname # @option options [String] :socket Unix socket file name as alternative to `:port` and `:host` backend do |socket: nil, host: '127.0.0.1', port: 9000| if socket UNIXSocket.open(socket) else TCPSocket.open(host, port) end end # (see Proxy#key?) def key?(key, options = {}) write(:key?, key, options) read_msg end # (see Proxy#load) def load(key, options = {}) write(:load, key, options) read_msg end # (see Proxy#store) def store(key, value, options = {}) write(:store, key, value, options) read_msg value end # (see Proxy#delete) def delete(key, options = {}) write(:delete, key, options) read_msg end # (see Proxy#increment) def increment(key, amount = 1, options = {}) write(:increment, key, amount, options) read_msg end # (see Proxy#create) def create(key, value, options = {}) write(:create, key, value, options) read_msg end # (see Proxy#clear) def clear(options = {}) write(:clear, options) read_msg self end # (see Proxy#close) def close backend.close nil end # (see Proxy#each_key) def each_key raise NotImplementedError, 'each_key is not supported' unless supports?(:each_key) return enum_for(:each_key) unless block_given? begin write(:each_key) yield_break = false loop do write('NEXT') # A StopIteration error will be raised by this call if the server # reached the end of the enumeration. This will stop the loop # automatically. result = read_msg # yield_break will be true in the ensure block (below) if anything # happened during the yield to stop further enumeration. yield_break = true yield result yield_break = false end ensure write('BREAK') if yield_break read_msg # nil return from each_key end self end # (see Default#features) def features @features ||= begin write(:features) read_msg.freeze end end private def write(*args) s = Marshal.dump(args) backend.write([s.bytesize].pack('N') << s) end # JRuby doesn't support socket#recv with flags if defined?(JRUBY_VERSION) def read(bytes) received = backend.read(bytes) raise EOFError, "Server closed socket" unless received && received.bytesize == bytes received end else def read(bytes) received = backend.recv(bytes, Socket::MSG_WAITALL) raise EOFError, "Server closed socket" unless received && received.bytesize == bytes received end end def read_msg size = read(4).unpack('N').first result = Marshal.load(read(size)) raise result if Exception === result result end end end end moneta-1.5.2/lib/moneta/adapters/cookie.rb000066400000000000000000000017071433316074200204440ustar00rootroot00000000000000module Moneta module Adapters # Cookie backend used by the middleware {Rack::MonetaCookies} # @api public class Cookie < Memory attr_reader :cookies def initialize(options = {}) super @options, @cookies = options, {} end # (see Proxy#store) def store(key, value, options = {}) cookie = @options.merge(options) cookie[:value] = value cookie[:expires] += Time.now.to_i if cookie[:expires] @cookies[key] = cookie super end # (see Proxy#delete) def delete(key, options = {}) @cookies[key] = nil super end # (see Proxy#clear) def clear(options = {}) @backend.each_key { |key| @cookies[key] = nil } super self end # Reset the cookie store # This method is used by the middleware. def reset(cookies) @cookies, @backend = {}, cookies end end end end moneta-1.5.2/lib/moneta/adapters/couch.rb000066400000000000000000000330351433316074200202730ustar00rootroot00000000000000require 'faraday' require 'multi_json' module Moneta module Adapters # CouchDB backend # # You can store hashes directly using this adapter. # # @example Store hashes # db = Moneta::Adapters::Couch.new # db['key'] = {a: 1, b: 2} # # @api public class Couch < Adapter # @api private class HTTPError < StandardError attr_reader :status, :request_method, :key def initialize(status, request_method, key) @status = status @request_method = request_method.to_sym @key = key super "HTTP Error: #{@status} (#{@request_method.to_s.upcase} #{@key})" end end supports :create, :each_key config :value_field, default: 'value' config :type_field, default: 'type' config :login config :password config :adapter config :skip_create_db backend do |scheme: 'http', host: '127.0.0.1', port: 5984, db: 'moneta', adapter: nil, **options| ::Faraday.new "#{scheme}://#{host}:#{port}/#{db}", options do |faraday| faraday.adapter adapter if adapter end end # @param [Hash] options # @option options [String] :host ('127.0.0.1') Couch host # @option options [String] :port (5984) Couch port # @option options [String] :db ('moneta') Couch database # @option options [String] :scheme ('http') HTTP scheme to use # @option options [String] :value_field ('value') Document field to store value # @option options [String] :type_field ('type') Document field to store value type # @option options [String] :login Login name to use for HTTP basic authentication # @option options [String] :password Password to use for HTTP basic authentication # @option options [Symbol] :adapter Adapter to use with Faraday # @option options [Faraday::Connecton] :backend Use existing backend instance # @option options Other options passed to {Faraday::new} (unless # :backend option is provided). def initialize(options = {}) super if config.login && config.password # Faraday 1.x had a `basic_auth` function if backend.respond_to? :basic_auth backend.basic_auth(config.login, config.password) else backend.request :authorization, :basic, config.login, config.password end end @rev_cache = Moneta.build do use :Lock adapter :LRUHash end create_db unless config.skip_create_db end # (see Proxy#key?) # @option options [Boolean] :cache_rev (true) Whether to cache the rev of # the document for faster updating def key?(key, options = {}) cache_rev = options[:cache_rev] != false head(key, cache_rev: cache_rev) end # (see Proxy#load) # @option (see #key?) def load(key, options = {}) cache_rev = options[:cache_rev] != false doc = get(key, cache_rev: cache_rev) doc ? doc_to_value(doc) : nil end # (see Proxy#store) # @option (see #key?) # @option options [Boolean] :batch (false) Whether to do a # {https://docs.couchdb.org/en/stable/api/database/common.html#api-doc-batch-writes # batch mode write} # @option options [Boolean] :full_commit (nil) Set to `true` or `false` # to override the server's # {https://docs.couchdb.org/en/stable/config/couchdb.html#couchdb/delayed_commits # commit policy} def store(key, value, options = {}) put(key, value_to_doc(value, rev(key)), headers: full_commit_header(options[:full_commit]), query: options[:batch] ? { batch: 'ok' } : {}, cache_rev: options[:cache_rev] != false, expect: options[:batch] ? 202 : 201) value rescue HTTPError tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#delete) # @option options [Boolean] :batch (false) Whether to do a # {https://docs.couchdb.org/en/stable/api/database/common.html#api-doc-batch-writes # batch mode write} # @option options [Boolean] :full_commit (nil) Set to `true` or `false` # to override the server's # {https://docs.couchdb.org/en/stable/config/couchdb.html#couchdb/delayed_commits # commit policy} def delete(key, options = {}) get_response = get(key, returns: :response) if get_response.success? value = body_to_value(get_response.body) existing_rev = parse_rev(get_response) query = { rev: existing_rev } query[:batch] = 'ok' if options[:batch] request(:delete, key, headers: full_commit_header(options[:full_commit]), query: query, expect: options[:batch] ? 202 : 200) delete_cached_rev(key) value end rescue HTTPError tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#clear) # @option options [Boolean] :compact (false) Whether to compact the database after clearing # @option options [Boolean] :await_compact (false) Whether to wait for compaction to complete # before returning. # @option options [Boolean] :full_commit (nil) Set to `true` or `false` # to override the server's # {https://docs.couchdb.org/en/stable/config/couchdb.html#couchdb/delayed_commits # commit policy} def clear(options = {}) loop do docs = all_docs(limit: 10_000) break if docs['rows'].empty? deletions = docs['rows'].map do |row| { _id: row['id'], _rev: row['value']['rev'], _deleted: true } end bulk_docs(deletions, full_commit: options[:full_commit]) end # Compact the database unless told not to if options[:compact] post('_compact', expect: 202) # Performance won't be great while compaction is happening, so by default we wait for it if options[:await_compact] loop do db_info = get('', expect: 200) break unless db_info['compact_running'] # wait before checking again sleep 1 end end end self end # (see Proxy#create) # @option (see #key?) def create(key, value, options = {}) cache_rev = options[:cache_rev] != false doc = value_to_doc(value, nil) response = put(key, doc, cache_rev: cache_rev, returns: :response) case response.status when 201 true when 409 false else raise HTTPError.new(response.status, :put, @backend.create_url(key)) end rescue HTTPError tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#each_key) def each_key return enum_for(:each_key) unless block_given? skip = 0 limit = 1000 loop do docs = all_docs(limit: limit, skip: skip) break if docs['rows'].empty? skip += docs['rows'].length docs['rows'].each do |row| key = row['id'] @rev_cache[key] = row['value']['rev'] yield key end end self end # (see Proxy#values_at) def values_at(*keys, **options) hash = Hash[slice(*keys, **options)] keys.map { |key| hash[key] } end # (see Proxy#slice) def slice(*keys, **options) docs = all_docs(keys: keys, include_docs: true) docs["rows"].map do |row| next unless doc = row['doc'] [row['id'], doc_to_value(doc)] end.compact end # (see Proxy#merge!) # @option options [Boolean] :full_commit (nil) Set to `true` or `false` # to override the server's # {https://docs.couchdb.org/en/stable/config/couchdb.html#couchdb/delayed_commits # commit policy} def merge!(pairs, options = {}) keys = pairs.map { |key, _| key }.to_a cache_revs(*keys.reject { |key| @rev_cache[key] }) if block_given? existing = Hash[slice(*keys, **options)] pairs = pairs.map do |key, new_value| [ key, if existing.key?(key) yield(key, existing[key], new_value) else new_value end ] end end docs = pairs.map { |key, value| value_to_doc(value, @rev_cache[key], key) }.to_a results = bulk_docs(docs, full_commit: options[:full_commit], returns: :doc) retries = results.each_with_object([]) do |row, retries| ok, id = row.values_at('ok', 'id') if ok @rev_cache[id] = row['rev'] elsif row['error'] == 'conflict' delete_cached_rev(id) retries << pairs.find { |key,| key == id } else raise "Unrecognised response: #{row}" end end # Recursive call with all conflicts if retries.empty? self else merge!(retries, options) end end private def full_commit_header(full_commit) full_commit == nil ? {} : { 'X-Couch-Full-Commit' => (!!full_commit).to_s } end def body_to_value(body) doc_to_value(MultiJson.load(body)) end def doc_to_value(doc) case doc[config.type_field] when 'Hash' doc = doc.dup doc.delete('_id') doc.delete('_rev') doc.delete(config.type_field) doc else doc[config.value_field] end end def value_to_doc(value, rev, id = nil) doc = case value when Hash value.merge(config.type_field => 'Hash') when String { config.value_field => value, config.type_field => 'String' } when Float, Integer { config.value_field => value, config.type_field => 'Number' } else raise ArgumentError, "Invalid value type: #{value.class}" end doc['_rev'] = rev if rev doc['_id'] = id if id doc end def create_db loop do response = put('', returns: :response) case response.status when 201 break when 412 # Make sure the database really does exist # See https://github.com/apache/couchdb/issues/2073 break if head('') else raise HTTPError.new(response.status, :put, '') end # Wait before trying again sleep 1 end self end def cache_revs(*keys) docs = all_docs(keys: keys) docs['rows'].each do |row| next if !row['value'] || row['value']['deleted'] @rev_cache[row['id']] = row['value']['rev'] end end def parse_rev(response) response['etag'][1..-2] end def cache_response_rev(key, response) case response.status when 200, 201 @rev_cache[key] = parse_rev(response) else delete_cached_rev(key) nil end end def delete_cached_rev(key) @rev_cache.delete(key) end def rev(key) @rev_cache[key] || begin response = @backend.head(key) cache_response_rev(key, response) end end def encode_query(query) query.map { |key, value| [key, MultiJson.dump(value)] } end def request(method, key, body = nil, returns: :nil, cache_rev: false, expect: nil, query: nil, headers: {}) url = @backend.build_url(key, query) headers['Content-Type'] = 'application/json' if %i{put post}.include?(method) response = @backend.run_request(method, url, body || '', headers) if cache_rev cache_response_rev(key, response) end if expect raise HTTPError.new(response.status, method, url) unless response.status == expect end case returns when :response response when :success response.success? when :doc response.success? ? MultiJson.load(response.body) : nil when :nil nil else raise "Unknown returns param: #{returns.inspect}" end end def get(key, returns: :doc, **options) request(:get, key, returns: returns, **options) end def head(key, returns: :success, **options) request(:head, key, returns: returns, **options) end def put(key, doc = nil, returns: :success, **options) body = doc == nil ? '' : MultiJson.dump(doc) request(:put, key, body, returns: returns, **options) end def post(key, doc = nil, returns: :success, **options) body = doc == nil ? '' : MultiJson.dump(doc) request(:post, key, body, returns: returns, **options) end def all_docs(sorted: false, **params) keys = params.delete(:keys) query = encode_query(params.merge(sorted: sorted)) if keys post('_all_docs', { keys: keys }, query: query, expect: 200, returns: :doc) else get('_all_docs', query: query, expect: 200) end end def bulk_docs(docs, returns: :success, full_commit: nil) post('_bulk_docs', { docs: docs }, headers: full_commit_header(full_commit), returns: returns, expect: 201) end end end end moneta-1.5.2/lib/moneta/adapters/datamapper.rb000066400000000000000000000046631433316074200213150ustar00rootroot00000000000000require 'dm-core' require 'dm-migrations' module Moneta module Adapters # Datamapper backend # @api public class DataMapper include Defaults include Config include NilValues supports :create # @api private class Store include ::DataMapper::Resource property :k, String, key: true, length: 255 property :v, Text, lazy: false self.raise_on_save_failure = true end config :setup, required: true config :repository, default: :moneta, coerce: :to_sym config :table, default: :moneta, coerce: :to_sym # @param [Hash] options # @option options [String] :setup Datamapper setup string # @option options [String/Symbol] :repository (:moneta) Repository name # @option options [String/Symbol] :table (:moneta) Table name def initialize(options = {}) configure(options) Store.storage_names[config.repository] = config.table.to_s ::DataMapper.setup(config.repository, config.setup) context { Store.auto_upgrade! } end # (see Proxy#key?) def key?(key, options = {}) context { Store.get(key) != nil } end # (see Proxy#load) def load(key, options = {}) context do record = Store.get(key) record && record.v end end # (see Proxy#store) def store(key, value, options = {}) context do if record = Store.get(key) record.update(k: key, v: value) else Store.create(k: key, v: value) end value end rescue tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#create) def create(key, value, options = {}) context do Store.create(k: key, v: value) true end rescue # FIXME: This catches too many errors # it should only catch a not-unique-exception false end # (see Proxy#delete) def delete(key, options = {}) context do if record = Store.get(key) value = record.v record.destroy! value end end end # (see Proxy#clear) def clear(options = {}) context { Store.all.destroy! } self end private def context ::DataMapper.repository(config.repository) { yield } end end end end moneta-1.5.2/lib/moneta/adapters/daybreak.rb000066400000000000000000000026751433316074200207620ustar00rootroot00000000000000require 'daybreak' module Moneta module Adapters # Daybreak backend # @api public class Daybreak < Adapter include DBMAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::Daybreak] :backend Use existing backend instance backend { |file:| ::Daybreak::DB.new(file, serializer: ::Daybreak::Serializer::None) } # (see Proxy#load) def load(key, options = {}) backend.load if options[:sync] backend[key] end # (see Proxy#store) def store(key, value, options = {}) backend[key] = value backend.flush if options[:sync] value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) backend.lock { super } end # (see Proxy#create) def create(key, value, options = {}) backend.lock { super } end # (see Proxy#merge!) def merge!(pairs, options = {}) if block_given? backend.lock do backend.update(pairs.map do |key, new_value| new_value = yield(key, load(key), new_value) if key?(key) [key, new_value] end) end else backend.update(pairs) end self end end end end moneta-1.5.2/lib/moneta/adapters/dbm.rb000066400000000000000000000007611433316074200177340ustar00rootroot00000000000000require 'dbm' module Moneta module Adapters # DBM backend (Berkeley DB) # @api public class DBM < Adapter include DBMAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::DBM] :backend Use existing backend instance backend { |file:| ::DBM.new(file) } end end end moneta-1.5.2/lib/moneta/adapters/file.rb000066400000000000000000000072271433316074200201150ustar00rootroot00000000000000require 'fileutils' require 'English' module Moneta module Adapters # Filesystem backend # @api public class File include Defaults include Config supports :create, :increment, :each_key config :dir, required: true # @param [Hash] options # @option options [String] :dir Directory where files will be stored def initialize(options = {}) configure(**options) FileUtils.mkpath(config.dir) raise "#{config.dir} is not a directory" unless ::File.directory?(config.dir) end # (see Proxy#key?) def key?(key, options = {}) ::File.exist?(store_path(key)) end # (see Proxy#each_key) def each_key(&block) entries = ::Dir.entries(config.dir).reject do |k| ::File.directory?(::File.join(config.dir, k)) end if block_given? entries.each { |k| yield(k) } self else enum_for(:each_key) { ::Dir.entries(config.dir).length - 2 } end end # (see Proxy#load) def load(key, options = {}) ::File.read(store_path(key), mode: 'rb') rescue Errno::ENOENT nil end # (see Proxy#store) def store(key, value, options = {}) temp_file = ::File.join(config.dir, "value-#{$PROCESS_ID}-#{Thread.current.object_id}") path = store_path(key) FileUtils.mkpath(::File.dirname(path)) ::File.open(temp_file, 'wb') { |f| f.write(value) } ::File.rename(temp_file, path) value rescue File.unlink(temp_file) rescue nil raise end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) ::File.unlink(store_path(key)) value rescue Errno::ENOENT nil end # (see Proxy#clear) def clear(options = {}) temp_dir = "#{config.dir}-#{$PROCESS_ID}-#{Thread.current.object_id}" ::File.rename(config.dir, temp_dir) FileUtils.mkpath(config.dir) self rescue Errno::ENOENT self ensure FileUtils.rm_rf(temp_dir) end # (see Proxy#increment) def increment(key, amount = 1, options = {}) path = store_path(key) FileUtils.mkpath(::File.dirname(path)) ::File.open(path, ::File::RDWR | ::File::CREAT) do |f| Thread.pass until f.flock(::File::LOCK_EX) content = f.read amount += Integer(content) unless content.empty? content = amount.to_s f.binmode f.pos = 0 f.write(content) f.truncate(content.bytesize) amount end end # HACK: The implementation using File::EXCL is not atomic under JRuby 1.7.4 # See https://github.com/jruby/jruby/issues/827 if defined?(JRUBY_VERSION) # (see Proxy#create) def create(key, value, options = {}) path = store_path(key) FileUtils.mkpath(::File.dirname(path)) # Call native java.io.File#createNewFile return false unless ::Java::JavaIo::File.new(path).createNewFile ::File.open(path, 'wb+') { |f| f.write(value) } true end else # (see Proxy#create) def create(key, value, options = {}) path = store_path(key) FileUtils.mkpath(::File.dirname(path)) ::File.open(path, ::File::WRONLY | ::File::CREAT | ::File::EXCL) do |f| f.binmode f.write(value) end true rescue Errno::EEXIST false end end protected def store_path(key) ::File.join(config.dir, key) end end end end moneta-1.5.2/lib/moneta/adapters/fog.rb000066400000000000000000000027251433316074200177470ustar00rootroot00000000000000require 'fog/storage' module Moneta module Adapters # Fog backend (Cloud storage services) # @api public class Fog < Adapter config :dir, required: true backend { |**options| ::Fog::Storage.new(options) } # @param [Hash] options # @option options [String] :dir Fog directory # @option options [::Fog::Storage] :backend Use existing backend instance # @option options Other options passed to `Fog::Storage#new` def initialize(options = {}) super @directory = backend.directories.get(config.dir) || backend.directories.create(key: config.dir) end # (see Proxy#key?) def key?(key, options = {}) @directory.files.head(key) != nil end # (see Proxy#load) def load(key, options = {}) value = @directory.files.get(key) value && value.body end # (see Proxy#delete) def delete(key, options = {}) if value = @directory.files.get(key) body = value.body value.destroy body end end # (see Proxy#store) def store(key, value, options = {}) value = value.dup if value.frozen? # HACK: Fog needs unfrozen string @directory.files.create(options.merge(key: key, body: value)) value end # (see Proxy#clear) def clear(options = {}) @directory.files.all.each do |file| file.destroy end self end end end end moneta-1.5.2/lib/moneta/adapters/gdbm.rb000066400000000000000000000007501433316074200201010ustar00rootroot00000000000000require 'gdbm' module Moneta module Adapters # GDBM backend # @api public class GDBM < Adapter include DBMAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::GDBM] :backend Use existing backend instance backend { |file:| ::GDBM.new(file) } end end end moneta-1.5.2/lib/moneta/adapters/hbase.rb000066400000000000000000000057571433316074200202660ustar00rootroot00000000000000require 'hbaserb' module Moneta module Adapters # HBase thrift backend # @api public class HBase < Adapter config :column, 'value' config :table, 'moneta' config :column_family, 'moneta' backend { |host: '127.0.0.1', port: 9090| HBaseRb::Client.new(host, port) } # TODO: Add create support using checkAndPut if added to thrift api # https://issues.apache.org/jira/browse/HBASE-3307 # https://github.com/bmuller/hbaserb/issues/2 supports :increment # @param [Hash] options # @option options [String] :host ('127.0.0.1') Server host name # @option options [Integer] :port (9090) Server port # @option options [String] :table ('moneta') Table name # @option options [String] :column_family ('moneta') Column family # @option options [String] :column ('value') Column # @option options [::HBaseRb::Client] :backend Use existing backend instance def initialize(options = {}) super @column = [config.column_family, config.column].join(':') backend.create_table(config.table, config.column_family) unless backend.has_table?(config.table) @table = backend.get_table(config.table) end # (see Proxy#key?) def key?(key, options = {}) @table.get(key, @column).first != nil end # (see Proxy#load) def load(key, options = {}) cell = @table.get(key, @column).first cell && unpack(cell.value) end # (see Proxy#store) def store(key, value, options = {}) @table.mutate_row(key, @column => pack(value)) value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) result = @table.atomic_increment(key, @column, amount) # HACK: Throw error if applied to invalid value Integer(load(key)) if result == 0 result end # (see Proxy#delete) def delete(key, options = {}) if value = load(key, options) @table.delete_row(key) value end end # (see Proxy#clear) def clear(options = {}) @table.create_scanner do |row| @table.delete_row(row.row) end self end # (see Proxy#close) def close backend.close nil end private def pack(value) intvalue = value.to_i if intvalue >= 0 && intvalue <= 0xFFFFFFFFFFFFFFFF && intvalue.to_s == value # Pack as 8 byte big endian [intvalue].pack('Q>') elsif value.bytesize >= 8 # Add nul character to make value distinguishable from integer value + "\0" else value end end def unpack(value) if value.bytesize == 8 # Unpack 8 byte big endian value.unpack('Q>').first.to_s elsif value.bytesize >= 9 && value[-1] == ?\0 # Remove nul character value[0..-2] else value end end end end end moneta-1.5.2/lib/moneta/adapters/kyotocabinet.rb000066400000000000000000000052341433316074200216650ustar00rootroot00000000000000require 'kyotocabinet' module Moneta module Adapters # KyotoCabinet backend # @api public class KyotoCabinet < Adapter include HashAdapter supports :each_key, :increment, :create # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::KyotoCabinet::DB] :backend Use existing backend instance backend do |file:| backend = ::KyotoCabinet::DB.new raise backend.error.to_s unless backend.open(file, ::KyotoCabinet::DB::OWRITER | ::KyotoCabinet::DB::OCREATE) backend end # (see Proxy#key?) def key?(key, options = {}) backend.check(key) >= 0 end # (see Proxy#delete) def delete(key, options = {}) backend.seize(key) end # (see Proxy#create) def create(key, value, options = {}) backend.add(key, value) end # (see Proxy#close) def close backend.close nil end # (see Proxy#each_key) def each_key return enum_for(:each_key) { backend.count } unless block_given? backend.each_key { |arr| yield arr[0] } self end # (see Proxy#increment) def increment(key, amount = 1, options = {}) ret = nil success = backend.accept(key) do |_, value| ret = if value Integer(value) + amount else amount end ret.to_s end raise backend.error unless success ret end # (see Proxy#slice) def slice(*keys, atomic: true, **options) backend.get_bulk(keys, atomic) end # (see Proxy#values_at) def values_at(*keys, **options) hash = slice(*keys, **options) keys.map { |key| hash[key] } end # (see Proxy#merge!) def merge!(pairs, options = {}) hard = options.key?(:hard) ? options[:hard] : false atomic = options.key?(:atomic) ? options[:atomic] : true success = if block_given? backend.transaction(hard) do existing = slice(*pairs.map { |k, _| k }, **options) pairs = pairs.map do |key, new_value| if existing.key?(key) [key, yield(key, existing[key], new_value)] else [key, new_value] end end backend.set_bulk(pairs.to_h, atomic) >= 0 end else backend.set_bulk(pairs.to_h, atomic) >= 0 end raise backend.error unless success self end end end end moneta-1.5.2/lib/moneta/adapters/leveldb.rb000066400000000000000000000025111433316074200206020ustar00rootroot00000000000000require 'leveldb' module Moneta module Adapters # LevelDB backend # @api public class LevelDB < Adapter include HashAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :dir - Database path # @option options All other options passed to `LevelDB::DB#new` # @option options [::LevelDB::DB] :backend Use existing backend instance backend { |dir:| ::LevelDB::DB.new(dir) } # (see Proxy#key?) def key?(key, options = {}) backend.includes?(key) end # (see Proxy#clear) def clear(options = {}) backend.each { |k,| delete(k, options) } self end # (see Proxy#close) def close backend.close nil end # (see Proxy#each_key) def each_key return enum_for(:each_key) { backend.size } unless block_given? backend.each { |key, _| yield key } self end # (see Proxy#values_at) def values_at(*keys, **options) ret = nil backend.batch { ret = super } ret end # (see Proxy#merge!) def merge!(*keys, **options) backend.batch { super } self end end end end moneta-1.5.2/lib/moneta/adapters/lmdb.rb000066400000000000000000000051631433316074200201110ustar00rootroot00000000000000require 'lmdb' require 'fileutils' module Moneta module Adapters # LMDB backend # @api public class LMDB < Adapter supports :create, :increment, :each_key PUT_FLAGS = %i[nooverwrite nodupdata current append appenddup].freeze config :db, default: 'moneta' backend do |dir:, **options| FileUtils.mkpath(dir) ::LMDB.new(dir, options) end # @param [Hash] options # @option options [String] :dir Environment directory # @option options [::LMDB::Environment] :backend Use existing backend instance # @option options [String or nil] :db ('moneta') Database name def initialize(options = {}) super @db = backend.database(config.db, create: true) end # (see Proxy#key?) def key?(key, options = {}) @db.get(key) != nil end # (see Proxy#load) def load(key, options = {}) @db.get(key) end # (see Proxy#store) def store(key, value, options = {}) @db.put(key, value, Utils.only(options, *PUT_FLAGS)) value end # (see Proxy#delete) def delete(key, options = {}) backend.transaction do if value = @db.get(key) @db.delete(key) value end end end # (see Proxy#clear) def clear(options = {}) @db.clear self end # (see Proxy#increment) def increment(key, amount = 1, options = {}) backend.transaction do value = Integer(@db.get(key) || 0) + amount @db.put(key, value.to_s, Utils.only(options, *PUT_FLAGS)) value end end # (see Defaults#create) def create(key, value, options = {}) backend.transaction do if @db.get(key) false else @db.put(key, value, Utils.only(options, *PUT_FLAGS)) true end end end # (see Proxy#close) def close backend.close nil end # (see Proxy#each_key) def each_key return enum_for(:each_key) { @db.size } unless block_given? @db.cursor do |cursor| while record = cursor.next yield record[0] end end self end # (see Proxy#values_at) def values_at(*keys, **options) backend.transaction { super } end # (see Proxy#slice) def slice(*keys, **options) backend.transaction { super } end # (see Proxy#merge!) def merge!(pairs, options = {}) backend.transaction { super } end end end end moneta-1.5.2/lib/moneta/adapters/localmemcache.rb000066400000000000000000000011371433316074200217450ustar00rootroot00000000000000require 'localmemcache' module Moneta module Adapters # LocalMemCache backend # @api public class LocalMemCache < Adapter include HashAdapter # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::LocalMemCache] :backend Use existing backend instance backend { |file:| ::LocalMemCache.new(filename: file) } # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) backend.delete(key) value end end end end moneta-1.5.2/lib/moneta/adapters/lruhash.rb000066400000000000000000000051421433316074200206360ustar00rootroot00000000000000module Moneta module Adapters # LRUHash backend # # Based on {https://rubygems.org/gems/lru_redux lru_redux} but measures # both memory usage and hash size. # # @api public class LRUHash < Adapter include IncrementSupport include CreateSupport config :max_size, default: 1024000 config(:max_value) { |max_size:, max_value:, **| [max_value, max_size].compact.min } config :max_count, default: 10240 supports :each_key backend { {} } # @param [Hash] options # @option options [Integer] :max_size (1024000) Maximum byte size of all values, nil disables the limit # @option options [Integer] :max_value (options[:max_size]) Maximum byte size of one value, nil disables the limit # @option options [Integer] :max_count (10240) Maximum number of values, nil disables the limit def initialize(options = {}) super clear end # (see Proxy#key?) def key?(key, options = {}) backend.key?(key) end # (see Proxy#each_key) def each_key(&block) return enum_for(:each_key) { backend.length } unless block_given? # The backend needs to be duplicated because reading mutates this # store. backend.dup.each_key { |k| yield(k) } self end # (see Proxy#load) def load(key, options = {}) if value = backend.delete(key) backend[key] = value value end end # (see Proxy#store) def store(key, value, options = {}) if config.max_value && value.bytesize > config.max_value delete(key) else if config.max_size if old_value = backend.delete(key) @size -= old_value.bytesize end @size += value.bytesize end backend[key] = value drop while config.max_size && @size > config.max_size || config.max_count && backend.size > config.max_count end value end # (see Proxy#delete) def delete(key, options = {}) if value = backend.delete(key) and config.max_size @size -= value.bytesize end value end # (see Proxy#clear) def clear(options = {}) backend.clear @size = 0 self end # Drops the least-recently-used pair, if any # # @param [Hash] options Options to merge # @return [(Object, String), nil] The dropped pair, if any def drop(options = {}) if key = backend.keys.first [key, delete(key)] end end end end end moneta-1.5.2/lib/moneta/adapters/memcached.rb000066400000000000000000000006221433316074200210740ustar00rootroot00000000000000module Moneta # @api private module Adapters # Prefer Dalli over native Memcached! # # I measure no performance gain over the Dalli backend # using the Moneta backends. begin require 'moneta/adapters/memcached/dalli' Memcached = MemcachedDalli rescue LoadError require 'moneta/adapters/memcached/native' Memcached = MemcachedNative end end end moneta-1.5.2/lib/moneta/adapters/memcached/000077500000000000000000000000001433316074200205475ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/adapters/memcached/dalli.rb000066400000000000000000000065671433316074200221770ustar00rootroot00000000000000require 'dalli' module Moneta module Adapters # Memcached backend (using gem dalli) # @api public class MemcachedDalli < Adapter include ExpiresSupport supports :create, :increment # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :server ('127.0.0.1:11211') Memcached server # @option options [Integer] :expires Default expiration time # @option options [::Dalli::Client] :backend Use existing backend instance # @option options Other options passed to `Dalli::Client#new` backend { |server: '127.0.0.1:11211', **options| ::Dalli::Client.new(server, options) } # (see Proxy#load) def load(key, options = {}) value = backend.get(key) if value expires = expires_value(options, nil) backend.set(key, value, expires || nil, raw: true) if expires != nil value end end # (see Proxy#store) def store(key, value, options = {}) backend.set(key, value, expires_value(options) || nil, raw: true) value end # (see Proxy#delete) def delete(key, options = {}) value = backend.get(key) backend.delete(key) value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) result = if amount >= 0 backend.incr(key, amount, expires_value(options) || nil) else backend.decr(key, -amount, expires_value(options) || nil) end if result result elsif create(key, amount.to_s, options) amount else increment(key, amount, options) end end # (see Proxy#clear) def clear(options = {}) backend.flush_all self end # (see Defaults#create) def create(key, value, options = {}) !!backend.add(key, value, expires_value(options) || nil, raw: true) end # (see Proxy#close) def close backend.close nil end # (see Defaults#slice) def slice(*keys, **options) backend.get_multi(keys).tap do |pairs| next if pairs.empty? expires = expires_value(options, nil) next if expires == nil expires = expires.to_i if Numeric === expires expires ||= 0 backend.multi do pairs.each do |key, value| backend.set(key, value, expires, false) end end end end # (see Defaults#values_at) def values_at(*keys, **options) pairs = slice(*keys, **options) keys.map { |key| pairs.delete(key) } end # (see Defaults#merge!) def merge!(pairs, options = {}) expires = expires_value(options) expires = expires.to_i if Numeric === expires expires ||= nil if block_given? keys = pairs.map { |key, _| key }.to_a old_pairs = backend.get_multi(keys) pairs = pairs.map do |key, new_value| if old_pairs.key? key new_value = yield(key, old_pairs[key], new_value) end [key, new_value] end end backend.multi do pairs.each do |key, value| backend.set(key, value, expires, raw: true) end end self end end end end moneta-1.5.2/lib/moneta/adapters/memcached/native.rb000066400000000000000000000057161433316074200223730ustar00rootroot00000000000000require 'memcached' module Moneta module Adapters # Memcached backend (using gem memcached) # @api public class MemcachedNative < Adapter include ExpiresSupport supports :create, :increment # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :server ('127.0.0.1:11211') Memcached server # @option options [String] :namespace Key namespace # @option options [Integer] :expires (604800) Default expiration time # @option options [::Memcached] :backend Use existing backend instance # @option options Other options passed to `Memcached#new` backend do |server: '127.0.0.1:11211', namespace: nil, **options| options[:prefix_key] = namespace if namespace # We don't want a limitation on the key charset. Therefore we use the binary protocol. # It is also faster. options[:binary_protocol] = true unless options.include?(:binary_protocol) ::Memcached.new(server, options) end # (see Proxy#load) def load(key, options = {}) value = @backend.get(key, false) if value expires = expires_value(options, nil) unless expires == nil Numeric === expires and expires = expires.to_i @backend.set(key, value, expires || 0, false) end value end rescue ::Memcached::NotFound nil end # (see Proxy#store) def store(key, value, options = {}) # TTL must be Integer expires = expires_value(options) Numeric === expires and expires = expires.to_i @backend.set(key, value, expires || 0, false) value end # (see Proxy#delete) def delete(key, options = {}) value = @backend.get(key, false) @backend.delete(key) value rescue ::Memcached::NotFound nil end # (see Proxy#increment) def increment(key, amount = 1, options = {}) result = if amount >= 0 @backend.increment(key, amount) else @backend.decrement(key, -amount) end # HACK: Throw error if applied to invalid value # see https://github.com/evan/memcached/issues/110 Integer((@backend.get(key, false) rescue 0)) if result == 0 result rescue ::Memcached::NotFound retry unless create(key, amount.to_s, options) amount end # (see Defaults#create) def create(key, value, options = {}) expires = expires_value(options) Numeric === expires and expires = expires.to_i @backend.add(key, value, expires || 0, false) true rescue ::Memcached::ConnectionDataExists false end # (see Proxy#clear) def clear(options = {}) @backend.flush self end # (see Proxy#close) def close @backend.reset nil end end end end moneta-1.5.2/lib/moneta/adapters/memory.rb000066400000000000000000000007251433316074200205020ustar00rootroot00000000000000module Moneta module Adapters # Memory backend using a hash to store the entries # @api public class Memory < Adapter include NilValues include HashAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options Options hash # @option options [Hash] :backend Use existing backend instance backend { {} } end end end moneta-1.5.2/lib/moneta/adapters/mongo.rb000066400000000000000000000215071433316074200203120ustar00rootroot00000000000000require 'mongo' module Moneta module Adapters # MongoDB backend # # Supports expiration, documents will be automatically removed starting # with mongodb >= 2.2 (see {http://docs.mongodb.org/manual/tutorial/expire-data/}). # # You can store hashes directly using this adapter. # # @example Store hashes # db = Moneta::Adapters::MongoOfficial.new # db['key'] = {a: 1, b: 2} # # @api public class Mongo < Adapter include ExpiresSupport supports :each_key, :create, :increment config :collection, default: 'moneta' config :db config :database, default: 'moneta' do |database:, db:, **| if db warn('Moneta::Adapters::Mongo - the :db option is deprecated and will be removed in a future version. Use :database instead') db else database end end config :expires_field, default: 'expiresAt' config :value_field, default: 'value' config :type_field, default: 'type' backend do |host: '127.0.0.1', port: 27017, **options| options[:logger] ||= ::Logger.new(STDERR).tap do |logger| logger.level = ::Logger::ERROR end ::Mongo::Client.new(["#{host}:#{port}"], options) end # @param [Hash] options # @option options [String] :collection ('moneta') MongoDB collection name # @option options [String] :host ('127.0.0.1') MongoDB server host # @option options [String] :user Username used to authenticate # @option options [String] :password Password used to authenticate # @option options [Integer] :port (MongoDB default port) MongoDB server port # @option options [String] :database ('moneta') MongoDB database # @option options [Integer] :expires Default expiration time # @option options [String] :expires_field ('expiresAt') Document field to store expiration time # @option options [String] :value_field ('value') Document field to store value # @option options [String] :type_field ('type') Document field to store value type # @option options [::Mongo::Client] :backend Use existing backend instance # @option options Other options passed to `Mongo::MongoClient#new` def initialize(options = {}) super @database = backend.use(config.database) @collection = @database[config.collection] if @database.command(buildinfo: 1).documents.first['version'] >= '2.2' @collection.indexes.create_one({ config.expires_field => 1 }, expire_after: 0) else warn 'Moneta::Adapters::Mongo - You are using MongoDB version < 2.2, expired documents will not be deleted' end end # (see Proxy#load) def load(key, options = {}) view = @collection.find(:$and => [ { _id: to_binary(key) }, not_expired ]) doc = view.limit(1).first if doc update_expiry(options, nil) do |expires| view.update_one(:$set => { config.expires_field => expires }) end doc_to_value(doc) end end # (see Proxy#store) def store(key, value, options = {}) key = to_binary(key) @collection.replace_one({ _id: key }, value_to_doc(key, value, options), upsert: true) value end # (see Proxy#each_key) def each_key return enum_for(:each_key) unless block_given? @collection.find.each { |doc| yield from_binary(doc[:_id]) } self end # (see Proxy#delete) def delete(key, options = {}) key = to_binary(key) if doc = @collection.find(_id: key).find_one_and_delete and !doc[config.expires_field] || doc[config.expires_field] >= Time.now doc_to_value(doc) end end # (see Proxy#increment) def increment(key, amount = 1, options = {}) @collection.find_one_and_update({ :$and => [{ _id: to_binary(key) }, not_expired] }, { :$inc => { config.value_field => amount } }, return_document: :after, upsert: true)[config.value_field] rescue ::Mongo::Error::OperationFailure tries ||= 0 (tries += 1) < 3 ? retry : raise end # (see Proxy#create) def create(key, value, options = {}) key = to_binary(key) @collection.insert_one(value_to_doc(key, value, options)) true rescue ::Mongo::Error::OperationFailure => error raise unless error.code == 11000 # duplicate key error false end # (see Proxy#clear) def clear(options = {}) @collection.delete_many self end # (see Proxy#close) def close @database.close nil end # (see Proxy#slice) def slice(*keys, **options) view = @collection.find(:$and => [ { _id: { :$in => keys.map(&method(:to_binary)) } }, not_expired ]) pairs = view.map { |doc| [from_binary(doc[:_id]), doc_to_value(doc)] } update_expiry(options, nil) do |expires| view.update_many(:$set => { config.expires_field => expires }) end pairs end # (see Proxy#merge!) def merge!(pairs, options = {}) existing = Hash[slice(*pairs.map { |key, _| key })] update_pairs, insert_pairs = pairs.partition { |key, _| existing.key?(key) } unless insert_pairs.empty? @collection.insert_many(insert_pairs.map do |key, value| value_to_doc(to_binary(key), value, options) end) end update_pairs.each do |key, value| value = yield(key, existing[key], value) if block_given? binary = to_binary(key) @collection.replace_one({ _id: binary }, value_to_doc(binary, value, options)) end self end # (see Proxy#fetch_values) def fetch_values(*keys, **options) return values_at(*keys, **options) unless block_given? hash = Hash[slice(*keys, **options)] keys.map do |key| if hash.key?(key) hash[key] else yield key end end end # (see Proxy#values_at) def values_at(*keys, **options) hash = Hash[slice(*keys, **options)] keys.map { |key| hash[key] } end private def doc_to_value(doc) case doc[config.type_field] when 'Hash' doc = doc.dup doc.delete('_id') doc.delete(config.type_field) doc.delete(config.expires_field) doc when 'Number' doc[config.value_field] else # In ruby_bson version 2 (and probably up), #to_s no longer returns the binary data from_binary(doc[config.value_field]) end end def value_to_doc(key, value, options) case value when Hash value.merge('_id' => key, config.type_field => 'Hash', # expires_field must be a Time object (BSON date datatype) config.expires_field => expires_at(options) || nil) when Float, Integer { '_id' => key, config.type_field => 'Number', config.value_field => value, # expires_field must be a Time object (BSON date datatype) config.expires_field => expires_at(options) || nil } when String intvalue = value.to_i { '_id' => key, config.type_field => 'String', config.value_field => intvalue.to_s == value ? intvalue : to_binary(value), # @expires_field must be a Time object (BSON date datatype) config.expires_field => expires_at(options) || nil } else raise ArgumentError, "Invalid value type: #{value.class}" end end # BSON will use String#force_encoding to make the string 8-bit # ASCII. This could break unicode text so we should dup in this # case, and it also fails with frozen strings. def to_binary(str) str = str.dup if str.frozen? || str.encoding != Encoding::ASCII_8BIT ::BSON::Binary.new(str) end def from_binary(binary) binary.is_a?(::BSON::Binary) ? binary.data : binary.to_s end def not_expired { :$or => [ { config.expires_field => nil }, { config.expires_field => { :$gte => Time.now } } ] } end def update_expiry(options, default) if (expires = expires_at(options, default)) != nil yield(expires || nil) end end end end end moneta-1.5.2/lib/moneta/adapters/null.rb000066400000000000000000000012271433316074200201420ustar00rootroot00000000000000module Moneta module Adapters # Null backend which doesn't store anything # @api public class Null include Defaults # @param [Hash] options Options hash def initialize(options = {}); end # (see Proxy#key?) def key?(key, options = {}) false end # (see Proxy#load) def load(key, options = {}) nil end # (see Proxy#store) def store(key, value, options = {}) value end # (see Proxy#delete) def delete(key, options = {}) nil end # (see Proxy#clear) def clear(options = {}) self end end end end moneta-1.5.2/lib/moneta/adapters/pstore.rb000066400000000000000000000060011433316074200204770ustar00rootroot00000000000000require 'pstore' require 'fileutils' module Moneta module Adapters # PStore backend # @api public class PStore < Adapter include NilValues supports :create, :increment, :each_key backend do |file:, threadsafe: false| FileUtils.mkpath(::File.dirname(file)) ::PStore.new(file, threadsafe) end # @param [Hash] options # @option options [String] :file PStore file # @option options [Boolean] :threadsafe Makes the PStore thread-safe # @option options [::PStore] :backend Use existing backend instance def initialize(options = {}) super @id = "Moneta::Adapters::PStore(#{object_id})" end # (see Proxy#key?) def key?(key, options = {}) transaction(true) { backend.root?(key) } end # (see Proxy#each_key) def each_key(&block) return enum_for(:each_key) { transaction(true) { backend.roots.size } } unless block_given? transaction(true) do backend.roots.each { |k| yield(k) } end self end # (see Proxy#load) def load(key, options = {}) transaction(true) { backend[key] } end # (see Proxy#store) def store(key, value, options = {}) transaction { backend[key] = value } end # (see Proxy#delete) def delete(key, options = {}) transaction { backend.delete(key) } end # (see Proxy#increment) def increment(key, amount = 1, options = {}) transaction do existing = backend[key] value = (existing == nil ? 0 : Integer(existing)) + amount backend[key] = value.to_s value end end # (see Proxy#create) def create(key, value, options = {}) transaction do if backend.root?(key) false else backend[key] = value true end end end # (see Proxy#clear) def clear(options = {}) transaction do backend.roots.each do |key| backend.delete(key) end end self end # (see Proxy#values_at) def values_at(*keys, **options) transaction(true) { super } end def fetch_values(*keys, **options) transaction(true) { super } end def slice(*keys, **options) transaction(true) { super } end def merge!(pairs, options = {}) transaction { super } end protected class TransactionError < StandardError; end def transaction(read_only = false) case Thread.current[@id] when read_only, false yield when true raise TransactionError, "Attempt to start read-write transaction inside a read-only transaction" else begin Thread.current[@id] = read_only backend.transaction(read_only) { yield } ensure Thread.current[@id] = nil end end end end end end moneta-1.5.2/lib/moneta/adapters/redis.rb000066400000000000000000000113771433316074200203050ustar00rootroot00000000000000require 'redis' module Moneta module Adapters # Redis backend # @api public class Redis < Adapter include ExpiresSupport supports :create, :increment, :each_key # @!method initialize(options = {}) # @param [Hash] options # @option options [Integer] :expires Default expiration time # @option options [::Redis] :backend Use existing backend instance # @option options Other options passed to `Redis#new` backend { |**options| ::Redis.new(options) } # (see Proxy#key?) # # This method considers false and 0 as "no-expire" and every positive # number as a time to live in seconds. def key?(key, options = {}) with_expiry_update(key, default: nil, **options) do |pipeline_handle| if pipeline_handle.respond_to?(:exists?) pipeline_handle.exists?(key) else pipeline_handle.exists(key) end end end # (see Proxy#each_key) def each_key(&block) return enum_for(:each_key) unless block_given? @backend.scan_each { |k| yield(k) } self end # (see Proxy#load) def load(key, options = {}) with_expiry_update(key, default: nil, **options) do |pipeline_handle| pipeline_handle.get(key) end end # (see Proxy#store) def store(key, value, options = {}) if expires = expires_value(options) Numeric === expires and expires = (expires * 1000).to_i @backend.psetex(key, expires, value) else @backend.set(key, value) end value end # (see Proxy#delete) def delete(key, options = {}) future = nil @backend.pipelined do |pipeline| future = pipeline.get(key) pipeline.del(key) end future.value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) with_expiry_update(key, **options) do |pipeline_handle| pipeline_handle.incrby(key, amount) end end # (see Proxy#clear) def clear(options = {}) @backend.flushdb self end # (see Defaults#create) def create(key, value, options = {}) expires = expires_value(options, config.expires) if @backend.setnx(key, value) update_expires(@backend, key, expires) true else false end end # (see Proxy#close) def close @backend.quit nil end # (see Defaults#values_at) def values_at(*keys, **options) with_expiry_update(*keys, default: nil, **options) do |pipeline_handle| pipeline_handle.mget(*keys) end end # (see Defaults#merge!) def merge!(pairs, options = {}) keys = pairs.map { |key, _| key } if block_given? old_values = @backend.mget(*keys) updates = pairs.each_with_index.with_object({}) do |(pair, i), updates| old_value = old_values[i] if old_value != nil key, new_value = pair updates[key] = yield(key, old_value, new_value) end end unless updates.empty? pairs = if pairs.respond_to?(:merge) pairs.merge(updates) else Hash[pairs.to_a].merge!(updates) end end end with_expiry_update(*keys, **options) do |pipeline_handle| pipeline_handle.mset(*pairs.to_a.flatten(1)) end self end protected def update_expires(pipeline_handle, key, expires) case expires when false pipeline_handle.persist(key) when Numeric pipeline_handle.pexpire(key, (expires * 1000).to_i) end end def with_expiry_update(*keys, default: config.expires, **options) expires = expires_value(options, default) if expires == nil yield(@backend) else future = nil @backend.multi do |pipeline| # as of redis 4.6 calling redis methods on the redis client itself # is deprecated in favor of a pipeline handle provided by the # +multi+ call. This will cause in error in redis >= 5.0. # # In order to continue supporting redis versions < 4.6, the following # fallback has been introduced and can be removed once moneta # no longer supports redis < 4.6. pipeline_handle = pipeline || @backend future = yield(pipeline_handle) keys.each { |key| update_expires(pipeline_handle, key, expires) } end future.value end end end end end moneta-1.5.2/lib/moneta/adapters/restclient.rb000066400000000000000000000027521433316074200213500ustar00rootroot00000000000000require 'faraday' module Moneta module Adapters # Moneta rest client backend which works together with {Rack::MonetaRest} # @api public class RestClient < Adapter # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :url URL # @option options [Symbol] :adapter The adapter to tell Faraday to use # @option options [Faraday::Connection] :backend Use existing backend instance # @option options Other options passed to {Faraday::new} (unless # :backend option is provided). backend do |url:, adapter: nil, **options| ::Faraday.new(url, options) do |faraday| faraday.adapter adapter if adapter end end # (see Proxy#key?) def key?(key, options = {}) backend.head(key).status == 200 end # (see Proxy#load) def load(key, options = {}) response = backend.get(key) response.status == 200 ? response.body : nil end # (see Proxy#store) def store(key, value, options = {}) response = backend.post(key, value) raise "HTTP error #{response.status}" unless response.status == 200 value end # (see Proxy#delete) def delete(key, options = {}) response = backend.delete(key) response.status == 200 ? response.body : nil end # (see Proxy#clear) def clear(options = {}) backend.delete '' self end end end end moneta-1.5.2/lib/moneta/adapters/riak.rb000066400000000000000000000032051433316074200201140ustar00rootroot00000000000000require 'riak' module Moneta module Adapters # Riak backend # @api public # @author Potapov Sergey (aka Blake) class Riak < Adapter config :bucket, default: 'moneta' config :content_type, default: 'application/octet-stream' backend { |**options| ::Riak::Client.new(options) } # @param [Hash] options # @option options [String] :bucket ('moneta') Bucket name # @option options [String] :content_type ('application/octet-stream') Default content type # @option options [::Riak::Client] :backend Use existing backend instance # @option options All other options passed to `Riak::Client#new` def initialize(options = {}) super @bucket = backend.bucket(config.bucket) end # (see Proxy#key?) def key?(key, options = {}) @bucket.exists?(key, options.dup) end # (see Proxy#load) def load(key, options = {}) @bucket.get(key, options.dup).raw_data rescue ::Riak::FailedRequest nil end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) @bucket.delete(key, options.dup) value end # (see Proxy#store) def store(key, value, options = {}) obj = ::Riak::RObject.new(@bucket, key) obj.content_type = options[:content_type] || config.content_type obj.raw_data = value obj.store(options.dup) value end # (see Proxy#clear) def clear(options = {}) @bucket.keys do |keys| keys.each { |key| @bucket.delete(key) } end self end end end end moneta-1.5.2/lib/moneta/adapters/sdbm.rb000066400000000000000000000007501433316074200201150ustar00rootroot00000000000000require 'sdbm' module Moneta module Adapters # SDBM backend # @api public class SDBM < Adapter include DBMAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::SDBM] :backend Use existing backend instance backend { |file:| ::SDBM.new(file) } end end end moneta-1.5.2/lib/moneta/adapters/sequel.rb000066400000000000000000000243471433316074200204760ustar00rootroot00000000000000require 'sequel' module Moneta module Adapters # Sequel backend # @api public class Sequel < Adapter autoload :MySQL, 'moneta/adapters/sequel/mysql' autoload :Postgres, 'moneta/adapters/sequel/postgres' autoload :PostgresHStore, 'moneta/adapters/sequel/postgres_hstore' autoload :SQLite, 'moneta/adapters/sequel/sqlite' supports :create, :increment, :each_key config :table, default: :moneta, coerce: :to_sym config :optimize, default: true config :create_table, default: true config :key_column, default: :k config :value_column, default: :v config :hstore, coerce: :to_s config :each_key_server backend do |db:, extensions: [], connection_validation_timeout: nil, **options| ::Sequel.connect(db, options).tap do |backend| extensions.map(&:to_sym).each(&backend.method(:extension)) if connection_validation_timeout backend.pool.connection_validation_timeout = connection_validation_timeout end end end # @param [Hash] options # @option options [String] :db Sequel database # @option options [String, Symbol] :table (:moneta) Table name # @option options [Array] :extensions ([]) List of Sequel extensions # @option options [Integer] :connection_validation_timeout (nil) Sequel connection_validation_timeout # @option options [Sequel::Database] :backend Use existing backend instance # @option options [Boolean] :optimize (true) Set to false to prevent database-specific optimisations # @option options [Proc, Boolean] :create_table (true) Provide a Proc for creating the table, or # set to false to disable table creation all together. If a Proc is given, it will be # called regardless of whether the table exists already. # @option options [Symbol] :key_column (:k) The name of the key column # @option options [Symbol] :value_column (:v) The name of the value column # @option options [String] :hstore If using Postgres, keys and values are stored in a single # row of the table in the value_column using the hstore format. The row to use is # the one where the value_column is equal to the value of this option, and will be created # if it doesn't exist. # @option options [Symbol] :each_key_server Some adapters are unable to do # multiple operations with a single connection. For these, it is # possible to specify a separate connection to use for `#each_key`. Use # in conjunction with Sequel's `:servers` option # @option options All other options passed to `Sequel#connect` def initialize(options = {}) super if config.hstore extend Sequel::PostgresHStore elsif config.optimize add_optimizations end if config.create_table.respond_to?(:call) config.create_table.call(backend) elsif config.create_table create_table end @table = backend[config.table] prepare_statements end # (see Proxy#key?) def key?(key, options = {}) @key.call(key: key) != nil end # (see Proxy#load) def load(key, options = {}) if row = @load.call(key: key) row[config.value_column] end end # (see Proxy#store) def store(key, value, options = {}) blob_value = blob(value) unless @store_update.call(key: key, value: blob_value) == 1 @create.call(key: key, value: blob_value) end value rescue ::Sequel::DatabaseError tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#create) def create(key, value, options = {}) @create.call(key: key, value: blob(value)) true rescue ::Sequel::ConstraintViolation false end # (see Proxy#increment) def increment(key, amount = 1, options = {}) backend.transaction do if existing = @load_for_update.call(key: key) existing_value = existing[config.value_column] amount += Integer(existing_value) raise IncrementError, "no update" unless @increment_update.call( key: key, value: existing_value, new_value: blob(amount.to_s) ) == 1 else @create.call(key: key, value: blob(amount.to_s)) end amount end rescue ::Sequel::DatabaseError # Concurrent modification might throw a bunch of different errors tries ||= 0 (tries += 1) < 10 ? retry : raise end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) @delete.call(key: key) value end # (see Proxy#clear) def clear(options = {}) @table.delete self end # (see Proxy#close) def close backend.disconnect nil end # (see Proxy#slice) def slice(*keys, **options) @slice.all(keys).map! { |row| [row[config.key_column], row[config.value_column]] } end # (see Proxy#values_at) def values_at(*keys, **options) pairs = Hash[slice(*keys, **options)] keys.map { |key| pairs[key] } end # (see Proxy#fetch_values) def fetch_values(*keys, **options) return values_at(*keys, **options) unless block_given? existing = Hash[slice(*keys, **options)] keys.map do |key| if existing.key? key existing[key] else yield key end end end # (see Proxy#merge!) def merge!(pairs, options = {}) backend.transaction do existing = Hash[slice_for_update(pairs)] update_pairs, insert_pairs = pairs.partition { |k, _| existing.key?(k) } @table.import([config.key_column, config.value_column], blob_pairs(insert_pairs)) if block_given? update_pairs.map! do |key, new_value| [key, yield(key, existing[key], new_value)] end end update_pairs.each do |key, value| @store_update.call(key: key, value: blob(value)) end end self end # (see Proxy#each_key) def each_key return enum_for(:each_key) { @table.count } unless block_given? key_column = config.key_column if config.each_key_server @table.server(config.each_key_server).order(key_column).select(key_column).paged_each do |row| yield row[key_column] end else @table.select(key_column).order(key_column).paged_each(stream: false) do |row| yield row[key_column] end end self end protected # @api private def add_optimizations case backend.database_type when :mysql extend Sequel::MySQL when :postgres if matches = backend.get(::Sequel[:version].function).match(/PostgreSQL (\d+)\.(\d+)/) # Our optimisations only work on Postgres 9.5+ major, minor = matches[1..2].map(&:to_i) extend Sequel::Postgres if major > 9 || (major == 9 && minor >= 5) end when :sqlite extend Sequel::SQLite end end def blob(str) ::Sequel.blob(str) unless str == nil end def blob_pairs(pairs) pairs.map do |key, value| [key, blob(value)] end end def create_table key_column = config.key_column value_column = config.value_column backend.create_table?(config.table) do String key_column, null: false, primary_key: true File value_column end end def slice_for_update(pairs) @slice_for_update.all(pairs.map { |k, _| k }.to_a).map! do |row| [row[config.key_column], row[config.value_column]] end end def yield_merge_pairs(pairs) existing = Hash[slice_for_update(pairs)] pairs.map do |key, new_value| new_value = yield(key, existing[key], new_value) if existing.key?(key) [key, new_value] end end def statement_id(id) "moneta_#{config.table}_#{id}".to_sym end def prepare_statements prepare_key prepare_load prepare_store prepare_create prepare_increment prepare_delete prepare_slice end def prepare_key @key = @table .where(config.key_column => :$key).select(1) .prepare(:first, statement_id(:key)) end def prepare_load @load = @table .where(config.key_column => :$key).select(config.value_column) .prepare(:first, statement_id(:load)) end def prepare_store @store_update = @table .where(config.key_column => :$key) .prepare(:update, statement_id(:store_update), config.value_column => :$value) end def prepare_create @create = @table .prepare(:insert, statement_id(:create), config.key_column => :$key, config.value_column => :$value) end def prepare_increment @load_for_update = @table .where(config.key_column => :$key).for_update .select(config.value_column) .prepare(:first, statement_id(:load_for_update)) @increment_update ||= @table .where(config.key_column => :$key, config.value_column => :$value) .prepare(:update, statement_id(:increment_update), config.value_column => :$new_value) end def prepare_delete @delete = @table.where(config.key_column => :$key) .prepare(:delete, statement_id(:delete)) end def prepare_slice @slice_for_update = ::Sequel::Dataset::PlaceholderLiteralizer.loader(@table) do |pl, ds| ds.filter(config.key_column => pl.arg).select(config.key_column, config.value_column).for_update end @slice = ::Sequel::Dataset::PlaceholderLiteralizer.loader(@table) do |pl, ds| ds.filter(config.key_column => pl.arg).select(config.key_column, config.value_column) end end # @api private class IncrementError < ::Sequel::DatabaseError; end end end end moneta-1.5.2/lib/moneta/adapters/sequel/000077500000000000000000000000001433316074200201375ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/adapters/sequel/mysql.rb000066400000000000000000000041071433316074200216330ustar00rootroot00000000000000module Moneta module Adapters class Sequel # @api private module MySQL def store(key, value, options = {}) @store.call(key: key, value: blob(value)) value end def increment(key, amount = 1, options = {}) @backend.transaction do # this creates a row-level lock even if there is no existing row (a # "gap lock"). if row = @load_for_update.call(key: key) # Integer() will raise an exception if the existing value cannot be parsed amount += Integer(row[config.value_column]) @increment_update.call(key: key, value: amount) else @create.call(key: key, value: amount) end amount end rescue ::Sequel::SerializationFailure # Thrown on deadlock tries ||= 0 (tries += 1) <= 3 ? retry : raise end def merge!(pairs, options = {}, &block) @backend.transaction do pairs = yield_merge_pairs(pairs, &block) if block_given? @table .on_duplicate_key_update .import([config.key_column, config.value_column], blob_pairs(pairs).to_a) end self end def each_key return super unless block_given? && config.each_key_server && @table.respond_to?(:stream) # Order is not required when streaming @table.server(config.each_key_server).select(config.key_column).paged_each do |row| yield row[config.key_column] end self end protected def prepare_store @store = @table .on_duplicate_key_update .prepare(:insert, statement_id(:store), config.key_column => :$key, config.value_column => :$value) end def prepare_increment @increment_update = @table .where(config.key_column => :$key) .prepare(:update, statement_id(:increment_update), config.value_column => :$value) super end end end end end moneta-1.5.2/lib/moneta/adapters/sequel/postgres.rb000066400000000000000000000051211433316074200223310ustar00rootroot00000000000000module Moneta module Adapters # @api public class Sequel # @api private module Postgres def store(key, value, options = {}) @store.call(key: key, value: blob(value)) value end def increment(key, amount = 1, options = {}) result = @increment.call(key: key, value: blob(amount.to_s), amount: amount) if row = result.first row[config.value_column].to_i end end def delete(key, options = {}) result = @delete.call(key: key) if row = result.first row[config.value_column] end end def merge!(pairs, options = {}, &block) @backend.transaction do pairs = yield_merge_pairs(pairs, &block) if block_given? @table .insert_conflict(target: config.key_column, update: { config.value_column => ::Sequel[:excluded][config.value_column] }) .import([config.key_column, config.value_column], blob_pairs(pairs).to_a) end self end def each_key return super unless block_given? && !config.each_key_server && @table.respond_to?(:use_cursor) # With a cursor, this will Just Work. @table.select(config.key_column).paged_each do |row| yield row[config.key_column] end self end protected def prepare_store @store = @table .insert_conflict(target: config.key_column, update: { config.value_column => ::Sequel[:excluded][config.value_column] }) .prepare(:insert, statement_id(:store), config.key_column => :$key, config.value_column => :$value) end def prepare_increment update_expr = ::Sequel[:convert_to].function( (::Sequel[:convert_from].function( ::Sequel[config.table][config.value_column], 'UTF8' ).cast(Integer) + :$amount).cast(String), 'UTF8' ) @increment = @table .returning(config.value_column) .insert_conflict(target: config.key_column, update: { config.value_column => update_expr }) .prepare(:insert, statement_id(:increment), config.key_column => :$key, config.value_column => :$value) end def prepare_delete @delete = @table .returning(config.value_column) .where(config.key_column => :$key) .prepare(:delete, statement_id(:delete)) end end end end end moneta-1.5.2/lib/moneta/adapters/sequel/postgres_hstore.rb000066400000000000000000000175121433316074200237240ustar00rootroot00000000000000::Sequel.extension :pg_hstore_ops module Moneta module Adapters class Sequel # @api private module PostgresHStore def self.extended(mod) mod.backend.extension :pg_hstore mod.backend.extension :pg_array end def key?(key, options = {}) if @key row = @key.call(row: config.hstore, key: key) || false row && row[:present] else @key_pl.get(key) end end def store(key, value, options = {}) @backend.transaction do create_row @store.call(row: config.hstore, pair: ::Sequel.hstore(key => value)) end value end def load(key, options = {}) if row = @load.call(row: config.hstore, key: key) row[:value] end end def delete(key, options = {}) @backend.transaction do value = load(key, options) @delete.call(row: config.hstore, key: key) value end end def increment(key, amount = 1, options = {}) @backend.transaction do create_row if row = @increment.call(row: config.hstore, key: key, amount: amount).first row[:value].to_i end end end def create(key, value, options = {}) @backend.transaction do create_row 1 == if @create @create.call(row: config.hstore, key: key, pair: ::Sequel.hstore(key => value)) else @table .where(config.key_column => config.hstore) .exclude(::Sequel[config.value_column].hstore.key?(key)) .update(config.value_column => ::Sequel[config.value_column].hstore.merge(key => value)) end end end def clear(options = {}) @clear.call(row: config.hstore) self end def values_at(*keys, **options) if row = @values_at.call(row: config.hstore, keys: ::Sequel.pg_array(keys)) row[:values].to_a else [] end end def slice(*keys, **options) if row = @slice.call(row: config.hstore, keys: ::Sequel.pg_array(keys)) row[:pairs].to_h else [] end end def merge!(pairs, options = {}, &block) @backend.transaction do create_row pairs = yield_merge_pairs(pairs, &block) if block_given? hash = Hash === pairs ? pairs : Hash[pairs.to_a] @store.call(row: config.hstore, pair: ::Sequel.hstore(hash)) end self end def each_key return enum_for(:each_key) { @size.call(row: config.hstore)[:size] } unless block_given? ds = if config.each_key_server @table.server(config.each_key_server) else @table end ds = ds.order(:skeys) unless @table.respond_to?(:use_cursor) ds.where(config.key_column => config.hstore) .select(::Sequel[config.value_column].hstore.skeys) .paged_each do |row| yield row[:skeys] end self end protected def create_row @create_row.call(row: config.hstore) end def create_table key_column = config.key_column value_column = config.value_column @backend.create_table?(config.table) do column key_column, String, null: false, primary_key: true column value_column, :hstore index value_column, type: :gin end end def slice_for_update(pairs) keys = pairs.map { |k, _| k }.to_a if row = @slice_for_update.call(row: config.hstore, keys: ::Sequel.pg_array(keys)) row[:pairs].to_h else {} end end def prepare_statements super prepare_create_row prepare_clear prepare_values_at prepare_size end def prepare_create_row @create_row = @table .insert_ignore .prepare(:insert, statement_id(:hstore_create_row), config.key_column => :$row, config.value_column => '') end def prepare_clear @clear = @table.where(config.key_column => :$row).prepare(:update, statement_id(:hstore_clear), config.value_column => '') end def prepare_key if defined?(JRUBY_VERSION) @key_pl = ::Sequel::Dataset::PlaceholderLiteralizer.loader(@table) do |pl, ds| ds.where(config.key_column => config.hstore).select(::Sequel[config.value_column].hstore.key?(pl.arg)) end else @key = @table.where(config.key_column => :$row) .select(::Sequel[config.value_column].hstore.key?(:$key).as(:present)) .prepare(:first, statement_id(:hstore_key)) end end def prepare_store @store = @table .where(config.key_column => :$row) .prepare(:update, statement_id(:hstore_store), config.value_column => ::Sequel[config.value_column].hstore.merge(:$pair)) end def prepare_increment pair = ::Sequel[:hstore] .function(:$key, ( ::Sequel[:coalesce].function(::Sequel[config.value_column].hstore[:$key].cast(Integer), 0) + :$amount ).cast(String)) @increment = @table .returning(::Sequel[config.value_column].hstore[:$key].as(:value)) .where(config.key_column => :$row) .prepare(:update, statement_id(:hstore_increment), config.value_column => ::Sequel.join([config.value_column, pair])) end def prepare_load @load = @table.where(config.key_column => :$row) .select(::Sequel[config.value_column].hstore[:$key].as(:value)) .prepare(:first, statement_id(:hstore_load)) end def prepare_delete @delete = @table.where(config.key_column => :$row) .prepare(:update, statement_id(:hstore_delete), config.value_column => ::Sequel[config.value_column].hstore.delete(:$key)) end def prepare_create # Under JRuby we can't use a prepared statement for queries involving # the hstore `?` (key?) operator. See # https://stackoverflow.com/questions/11940401/escaping-hstore-contains-operators-in-a-jdbc-prepared-statement return if defined?(JRUBY_VERSION) @create = @table .where(config.key_column => :$row) .exclude(::Sequel[config.value_column].hstore.key?(:$key)) .prepare(:update, statement_id(:hstore_create), config.value_column => ::Sequel[config.value_column].hstore.merge(:$pair)) end def prepare_values_at @values_at = @table .where(config.key_column => :$row) .select(::Sequel[config.value_column].hstore[::Sequel.cast(:$keys, :"text[]")].as(:values)) .prepare(:first, statement_id(:hstore_values_at)) end def prepare_slice slice = @table .where(config.key_column => :$row) .select(::Sequel[config.value_column].hstore.slice(:$keys).as(:pairs)) @slice = slice.prepare(:first, statement_id(:hstore_slice)) @slice_for_update = slice.for_update.prepare(:first, statement_id(:hstore_slice_for_update)) end def prepare_size @size = @backend .from(@table.where(config.key_column => :$row) .select(::Sequel[config.value_column].hstore.each)) .select { count.function.*.as(:size) } .prepare(:first, statement_id(:hstore_size)) end end end end end moneta-1.5.2/lib/moneta/adapters/sequel/sqlite.rb000066400000000000000000000040001433316074200217570ustar00rootroot00000000000000module Moneta module Adapters class Sequel # @api private module SQLite def self.extended(mod) version = mod.backend.get(::Sequel[:sqlite_version].function) # See https://sqlite.org/lang_UPSERT.html mod.instance_variable_set(:@can_upsert, ::Gem::Version.new(version) >= ::Gem::Version.new('3.24.0')) end def store(key, value, options = {}) @table.insert_conflict(:replace).insert(config.key_column => key, config.value_column => blob(value)) value end def increment(key, amount = 1, options = {}) return super unless @can_upsert @backend.transaction do @increment.call(key: key, value: blob(amount.to_s), amount: amount) Integer(load(key)) end end def merge!(pairs, options = {}, &block) @backend.transaction do pairs = yield_merge_pairs(pairs, &block) if block_given? @table.insert_conflict(:replace).import([config.key_column, config.value_column], blob_pairs(pairs).to_a) end self end protected def prepare_store @store = @table .insert_conflict(:replace) .prepare(:insert, statement_id(:store), config.key_column => :$key, config.value_column => :$value) end def prepare_increment return super unless @can_upsert update_expr = (::Sequel[config.value_column].cast(Integer) + :$amount).cast(:blob) @increment = @table .insert_conflict( target: config.key_column, update: { config.value_column => update_expr }, update_where: ::Sequel.|({ config.value_column => blob("0") }, { ::Sequel.~(::Sequel[config.value_column].cast(Integer)) => 0 }) ) .prepare(:insert, statement_id(:increment), config.key_column => :$key, config.value_column => :$value) end end end end end moneta-1.5.2/lib/moneta/adapters/sqlite.rb000066400000000000000000000122741433316074200204750ustar00rootroot00000000000000require 'sqlite3' module Moneta module Adapters # Sqlite3 backend # @api public class Sqlite < Adapter include IncrementSupport supports :create, :each_key config :table, default: 'moneta' config :busy_timeout, default: 1000 config :journal_mode backend { |file:| ::SQLite3::Database.new(file) } # @param [Hash] options # @option options [String] :file Database file # @option options [String] :table ('moneta') Table name # @option options [Integer] :busy_timeout (1000) Sqlite timeout if database is busy # @option options [::Sqlite3::Database] :backend Use existing backend instance # @option options [String, Symbol] :journal_mode Set the journal mode for the connection def initialize(options = {}) super backend.busy_timeout(config.busy_timeout) backend.execute("create table if not exists #{config.table} (k blob not null primary key, v blob)") if journal_mode = config.journal_mode backend.journal_mode = journal_mode.to_s end @stmts = [@exists = backend.prepare("select exists(select 1 from #{config.table} where k = ?)"), @select = backend.prepare("select v from #{config.table} where k = ?"), @replace = backend.prepare("replace into #{config.table} values (?, ?)"), @delete = backend.prepare("delete from #{config.table} where k = ?"), @clear = backend.prepare("delete from #{config.table}"), @create = backend.prepare("insert into #{config.table} values (?, ?)"), @keys = backend.prepare("select k from #{config.table}"), @count = backend.prepare("select count(*) from #{config.table}")] version = backend.execute("select sqlite_version()").first.first if @can_upsert = ::Gem::Version.new(version) >= ::Gem::Version.new('3.24.0') @stmts << (@increment = backend.prepare <<-SQL) insert into #{config.table} values (?, ?) on conflict (k) do update set v = cast(cast(v as integer) + ? as blob) where v = '0' or v = X'30' or cast(v as integer) != 0 SQL end end # (see Proxy#key?) def key?(key, options = {}) @exists.execute!(key).first.first.to_i == 1 end # (see Proxy#load) def load(key, options = {}) rows = @select.execute!(key) rows.empty? ? nil : rows.first.first end # (see Proxy#store) def store(key, value, options = {}) @replace.execute!(key, value) value end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) @delete.execute!(key) value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) backend.transaction(:exclusive) { return super } unless @can_upsert backend.transaction do @increment.execute!(key, amount.to_s, amount) return Integer(load(key)) end end # (see Proxy#clear) def clear(options = {}) @clear.execute! self end # (see Default#create) def create(key, value, options = {}) @create.execute!(key, value) true rescue SQLite3::ConstraintException # If you know a better way to detect whether an insert-ignore # suceeded, please tell me. @create.reset! false end # (see Proxy#close) def close @stmts.each { |s| s.close } backend.close nil end # (see Proxy#slice) def slice(*keys, **options) query = "select k, v from #{config.table} where k in (#{(['?'] * keys.length).join(',')})" backend.execute(query, keys) end # (see Proxy#values_at) def values_at(*keys, **options) hash = Hash[slice(*keys, **options)] keys.map { |key| hash[key] } end # (see Proxy#fetch_values) def fetch_values(*keys, **options) return values_at(*keys, **options) unless block_given? hash = Hash[slice(*keys, **options)] keys.map do |key| if hash.key?(key) hash[key] else yield key end end end # (see Proxy#merge!) def merge!(pairs, options = {}) transaction = backend.transaction if block_given? if block_given? existing = Hash[slice(*pairs.map { |k, _| k }.to_a)] pairs = pairs.map do |key, new_value| new_value = yield(key, existing[key], new_value) if existing.key?(key) [key, new_value] end.to_a else pairs = pairs.to_a end query = "replace into #{config.table} (k, v) values" + (['(?, ?)'] * pairs.length).join(',') backend.query(query, pairs.flatten).close rescue backend.rollback if transaction raise else backend.commit if transaction self end # (see Proxy#each_key) def each_key return enum_for(:each_key) { @count.execute!.first.first } unless block_given? @keys.execute!.each do |row| yield row.first end self end end end end moneta-1.5.2/lib/moneta/adapters/tdb.rb000066400000000000000000000013731433316074200177430ustar00rootroot00000000000000require 'tdb' module Moneta module Adapters # TDB backend # @api public class TDB < Adapter include HashAdapter include IncrementSupport include EachKeySupport supports :create # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [::TDB] :backend Use existing backend instance backend { |file:, **options| ::TDB.new(file, options) } # (see Proxy#close) def close backend.close nil end # (see Proxy#create) def create(key, value, options = {}) backend.insert!(key, value) true rescue ::TDB::ERR::EXISTS false end end end end moneta-1.5.2/lib/moneta/adapters/tokyocabinet.rb000066400000000000000000000030351433316074200216620ustar00rootroot00000000000000require 'tokyocabinet' module Moneta module Adapters # TokyoCabinet backend # @api public class TokyoCabinet < Adapter include HashAdapter include IncrementSupport include CreateSupport include EachKeySupport # @!method initialize(options = {}) # @param [Hash] options # @option options [String] :file Database file # @option options [Symbol] :type (:hdb) Database type (:bdb and :hdb possible) # @option options [::TokyoCabinet::*DB] :backend Use existing backend instance backend do |file:, type: :hdb| case type when :bdb ::TokyoCabinet::BDB.new.tap do |backend| backend.open(file, ::TokyoCabinet::BDB::OWRITER | ::TokyoCabinet::BDB::OCREAT) or raise backend.errmsg(backend.ecode) end when :hdb ::TokyoCabinet::HDB.new.tap do |backend| backend.open(file, ::TokyoCabinet::HDB::OWRITER | ::TokyoCabinet::HDB::OCREAT) or raise backend.errmsg(backend.ecode) end else raise ArgumentError, ":type must be :bdb or :hdb" end end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) if value @backend.delete(key) value end end # (see Proxy#create) def create(key, value, options = {}) @backend.putkeep(key, value) end # (see Proxy#close) def close @backend.close nil end end end end moneta-1.5.2/lib/moneta/adapters/tokyotyrant.rb000066400000000000000000000071721433316074200216040ustar00rootroot00000000000000begin # Native client require 'tokyo_tyrant' rescue LoadError # Ruby client require 'tokyotyrant' end module Moneta module Adapters # TokyoTyrant backend # @api public class TokyoTyrant < Adapter include HashAdapter # error code: no record found ENOREC = 7 supports :create, :increment backend do |host: '127.0.0.1', port: 1978| if defined?(::TokyoTyrant::RDB) # Use ruby client ::TokyoTyrant::RDB.new.tap do |backend| backend.open(host, port) or raise backend.errmsg end else # Use native client ::TokyoTyrant::DB.new(host, port) end end # @param [Hash] options # @option options [String] :host ('127.0.0.1') Server host name # @option options [Integer] :port (1978) Server port # @option options [::TokyoTyrant::RDB] :backend Use existing backend instance def initialize(options = {}) super @native = backend.class.name != 'TokyoTyrant::RDB' probe = '__tokyotyrant_endianness_probe' backend.delete(probe) backend.addint(probe, 1) @pack = backend.delete(probe) == [1].pack('l>') ? 'l>' : 'l<' end # (see Proxy#load) def load(key, options = {}) value = backend[key] # raise if there is an error and the error is not "no record" error if value == nil && backend.ecode != ENOREC value && unpack(value) end # (see Proxy#store) def store(key, value, options = {}) backend.put(key, pack(value)) or error value end # (see Proxy#delete) def delete(key, options = {}) value = load(key, options) if value backend.delete(key) or error value end end # (see Proxy#increment) def increment(key, amount = 1, options = {}) backend.addint(key, amount) or error end # (see Proxy#create) def create(key, value, options = {}) if @native begin # Native client throws an exception backend.putkeep(key, pack(value)) rescue TokyoTyrantError false end else backend.putkeep(key, pack(value)) end end # (see Proxy#close) def close backend.close nil end # (see Proxy#slice) def slice(*keys, **options) hash = if @native backend.mget(*keys) else hash = Hash[keys.map { |key| [key] }] raise unless backend.mget(hash) >= 0 hash end hash.each do |key, value| hash[key] = unpack(value) end end # (see Proxy#values_at) def values_at(*keys, **options) hash = slice(*keys, **options) keys.map { |key| hash[key] } end private def pack(value) intvalue = value.to_i if intvalue >= 0 && intvalue <= 0xFFFFFFFF && intvalue.to_s == value # Pack as 4 byte integer [intvalue].pack(@pack) elsif value.bytesize >= 4 # Add nul character to make value distinguishable from integer value + "\0" else value end end def unpack(value) if value.bytesize == 4 # Unpack 4 byte integer value.unpack(@pack).first.to_s elsif value.bytesize >= 5 && value[-1] == ?\0 # Remove nul character value[0..-2] else value end end def error raise "#{backend.class.name} error: #{backend.errmsg}" end end end end moneta-1.5.2/lib/moneta/adapters/yaml.rb000066400000000000000000000002761433316074200201350ustar00rootroot00000000000000require 'yaml/store' module Moneta module Adapters # YAML::Store backend # @api public class YAML < PStore backend { |file:| ::YAML::Store.new(file) } end end end moneta-1.5.2/lib/moneta/builder.rb000066400000000000000000000047471433316074200170250ustar00rootroot00000000000000module Moneta # Builder implements the DSL to build a stack of Moneta store proxies # @api private class Builder # @yieldparam Builder dsl code block def initialize(&block) raise ArgumentError, 'No block given' unless block_given? @proxies = [] instance_eval(&block) end # Build proxy stack # # @return [Object] Generated Moneta proxy stack # @api public def build adapter = @proxies.first if Array === adapter klass, options, block = adapter adapter = new_proxy(klass, options.dup, &block) check_arity(klass, adapter, 1) end @proxies[1..-1].inject([adapter]) do |result, proxy| klass, options, block = proxy proxy = new_proxy(klass, result.last, options.dup, &block) check_arity(klass, proxy, 2) result << proxy end end # Add proxy to stack # # @param [Symbol/Class] proxy Name of proxy class or proxy class # @param [Hash] options Options hash # @api public def use(proxy, options = {}, &block) proxy = Moneta.const_get(proxy) if Symbol === proxy raise ArgumentError, 'You must give a Class or a Symbol' unless Class === proxy @proxies.unshift [proxy, options, block] nil end # Add adapter to stack # # @param [Symbol/Class/Moneta store] adapter Name of adapter class, adapter class or Moneta store # @param [Hash] options Options hash # @api public def adapter(adapter, options = {}, &block) case adapter when Symbol use(Adapters.const_get(adapter), options, &block) when Class use(adapter, options, &block) else raise ArgumentError, 'Adapter must be a Moneta store' unless adapter.respond_to?(:load) && adapter.respond_to?(:store) raise ArgumentError, 'No options allowed' unless options.empty? @proxies.unshift adapter nil end end protected def new_proxy(klass, *args, &block) klass.new(*args, &block) rescue ArgumentError check_arity(klass, klass.allocate, args.size) raise end def check_arity(klass, proxy, expected) args = proxy.method(:initialize).arity.abs raise(ArgumentError, %{#{klass.name}#new accepts wrong number of arguments (#{args} accepted, #{expected} expected) Please check your Moneta builder block: * Proxies must be used before the adapter * Only one adapter is allowed * The adapter must be used last }) if args != expected end end end moneta-1.5.2/lib/moneta/cache.rb000066400000000000000000000060471433316074200164350ustar00rootroot00000000000000module Moneta # Combines two stores. One is used as cache, the other as backend adapter. # # @example Add `Moneta::Cache` to proxy stack # Moneta.build do # use(:Cache) do # adapter { adapter :File, dir: 'data' } # cache { adapter :Memory } # end # end # # @api public class Cache include Defaults # @api private class DSL def initialize(store, &block) @store = store instance_eval(&block) end # @api public def adapter(store = nil, &block) raise 'Adapter already set' if @store.adapter raise ArgumentError, 'Only argument or block allowed' if store && block @store.adapter = store || Moneta.build(&block) end # @api public def cache(store = nil, &block) raise 'Cache already set' if @store.cache raise ArgumentError, 'Only argument or block allowed' if store && block @store.cache = store || Moneta.build(&block) end end attr_accessor :cache, :adapter # @param [Hash] options Options hash # @option options [Moneta store] :cache Moneta store used as cache # @option options [Moneta store] :adapter Moneta store used as adapter # @yieldparam Builder block def initialize(options = {}, &block) @cache, @adapter = options[:cache], options[:adapter] DSL.new(self, &block) if block_given? end # (see Proxy#key?) def key?(key, options = {}) @cache.key?(key, options) || @adapter.key?(key, options) end # (see Proxy#load) def load(key, options = {}) if options[:sync] || (value = @cache.load(key, options)) == nil value = @adapter.load(key, options) @cache.store(key, value, options) if value != nil end value end # (see Proxy#store) def store(key, value, options = {}) @cache.store(key, value, options) @adapter.store(key, value, options) end # (see Proxy#increment) def increment(key, amount = 1, options = {}) @cache.delete(key, options) @adapter.increment(key, amount, options) end # (see Proxy#create) def create(key, value, options = {}) if @adapter.create(key, value, options) @cache.store(key, value, options) true else false end end # (see Proxy#delete) def delete(key, options = {}) @cache.delete(key, options) @adapter.delete(key, options) end # (see Proxy#clear) def clear(options = {}) @cache.clear(options) @adapter.clear(options) self end # (see Proxy#close) def close @cache.close @adapter.close end # (see Proxy#each_key) def each_key(&block) raise NotImplementedError, 'adapter doesn\'t support #each_key' \ unless supports? :each_key return enum_for(:each_key) unless block_given? @adapter.each_key(&block) self end # (see Proxy#features) def features @features ||= ((@cache.features + [:create, :increment, :each_key]) & @adapter.features).freeze end end end moneta-1.5.2/lib/moneta/config.rb000066400000000000000000000054041433316074200166330ustar00rootroot00000000000000require 'set' module Moneta # Some docs here module Config # @api private module ClassMethods def config(name, coerce: nil, default: nil, required: false, &block) raise ArgumentError, 'name must be a symbol' unless Symbol === name defaults = config_defaults raise ArgumentError, "#{name} is already a config option" if defaults.key?(name) raise ArgumentError, "coerce must respond to :to_proc" if coerce && !coerce.respond_to?(:to_proc) defaults.merge!(name => default.freeze).freeze instance_variable_set :@config_defaults, defaults instance_variable_set :@config_coercions, config_coercions.merge!(name => coerce.to_proc) if coerce instance_variable_set :@config_required_keys, config_required_keys.add(name).freeze if required instance_variable_set :@config_blocks, config_blocks.merge!(name => block) if block end def config_variable(name) if instance_variable_defined?(name) instance_variable_get(name).dup elsif superclass.respond_to?(:config_variable) superclass.config_variable(name) end end def config_defaults config_variable(:@config_defaults) || {} end def config_required_keys config_variable(:@config_required_keys) || Set.new end def config_coercions config_variable(:@config_coercions) || {} end def config_blocks config_variable(:@config_blocks) || {} end def config_struct unless @config_struct keys = config_defaults.keys @config_struct = Struct.new(*keys) unless keys.empty? end @config_struct end end def config raise "Not configured" unless defined?(@config) @config end def self.included(base) base.extend(ClassMethods) end protected def configure(**options) raise 'Already configured' if defined?(@config) self.class.config_required_keys.each do |key| raise ArgumentError, "#{key} is required" unless options.key? key end defaults = self.class.config_defaults overrides, remainder = options .partition { |key,| defaults.key? key } .map { |pairs| pairs.to_h } self.class.config_coercions.each do |key, coerce| overrides[key] = coerce.call(overrides[key]) if overrides.key?(key) end overridden = defaults.merge!(overrides) config_blocks = self.class.config_blocks values = overridden.map do |key, value| if config_block = config_blocks[key] instance_exec(**overridden, &config_block) else value end end @config = self.class.config_struct&.new(*values).freeze remainder end end end moneta-1.5.2/lib/moneta/create_support.rb000066400000000000000000000007301433316074200204220ustar00rootroot00000000000000module Moneta # Implements simple create using key? and store. # # This is sufficient for non-shared stores or if atomicity is not required. # @api private module CreateSupport # (see Defaults#create) def create(key, value, options = {}) if key? key false else store(key, value, options) true end end def self.included(base) base.supports(:create) if base.respond_to?(:supports) end end end moneta-1.5.2/lib/moneta/dbm_adapter.rb000066400000000000000000000013021433316074200176210ustar00rootroot00000000000000module Moneta # This is for adapters that conform to the DBM interface # @api private module DBMAdapter include HashAdapter # (see Proxy#close) def close @backend.close nil end # (see Proxy#merge!) def merge!(pairs, options = {}) hash = if block_given? keys = pairs.map { |k, _| k } old_pairs = Hash[slice(*keys)] Hash[pairs.map do |key, new_value| new_value = yield(key, old_pairs[key], new_value) if old_pairs.key?(key) [key, new_value] end.to_a] else Hash === pairs ? pairs : Hash[pairs.to_a] end @backend.update(hash) self end end end moneta-1.5.2/lib/moneta/defaults.rb000066400000000000000000000260021433316074200171720ustar00rootroot00000000000000module Moneta # Simple interface to key/value stores with Hash-like interface. # @api public module Defaults include ::Moneta::OptionSupport # @api private module ClassMethods # Returns features list # # @return [Array] list of features def features @features ||= superclass.respond_to?(:features) ? superclass.features : [].freeze end # Declares that this adapter supports the given feature. # # @example # class MyAdapter # include Moneta::Defaults # supports :create # def create(key, value, options = {}) # # implement create! # end # end def supports(*features) @features = (self.features | features).freeze end # Declares that this adapter does not support the given feature, and adds # a stub method that raises a NotImplementedError. Useful when inheriting # from another adapter. # # @example # class MyAdapter < OtherAdapterWithCreate # include Moneta::Defaults # not_supports :create # end def not_supports(*features) features.each do |feature| define_method(feature) do raise ::NotImplementedError, "#{feature} not supported" end end @features = (self.features - features).freeze end end def self.included(base) base.extend(ClassMethods) end # Exists the value with key # # @param [Object] key # @param [Hash] options # @option options [Integer] :expires Update expiration time (See {Expires}) # @option options [String] :prefix Prefix key (See {Transformer}) # @option options Other options as defined by the adapters or middleware # @return [Boolean] # @api public def key?(key, options = {}) load(key, options) != nil end # Atomically increment integer value with key # # This method also accepts negative amounts. # # @note Not every Moneta store implements this method, # a NotImplementedError is raised if it is not supported. # @param [Object] key # @param [Integer] amount # @param [Hash] options # @option options [String] :prefix Prefix key (See {Transformer}) # @option options Other options as defined by the adapters or middleware # @return [Object] value from store # @api public def increment(key, amount = 1, options = {}) raise NotImplementedError, 'increment is not supported' end # Atomically decrement integer value with key # # This is just syntactic sugar for calling #increment with a negative value. # # This method also accepts negative amounts. # # @param [Object] key # @param [Integer] amount # @param [Hash] options # @option options [String] :prefix Prefix key (See {Transformer}) # @option options Other options as defined by the adapters or middleware # @return [Object] value from store # @api public def decrement(key, amount = 1, options = {}) increment(key, -amount, options) end # Explicitly close the store # @return nil # @api public def close; end # Fetch a value with a key # # @overload fetch(key, options = {}, &block) # retrieve a key. if the key is not available, execute the # block and return its return value. # @param [Object] key # @param [Hash] options # @option options [Integer] :expires Update expiration time (See {Expires}) # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @return [Object] value from store # # @overload fetch(key, default, options = {}) # retrieve a key. if the key is not available, return the default value. # @param [Object] key # @param [Object] default Default value # @param [Hash] options # @option options [Integer] :expires Update expiration time (See {Expires}) # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @return [Object] value from store # # @api public def fetch(key, default = nil, options = nil) if block_given? raise ArgumentError, 'Only one argument accepted if block is given' if options result = load(key, default || {}) result == nil ? yield(key) : result else result = load(key, options || {}) result == nil ? default : result end end # Fetch value with key. Return nil if the key doesn't exist # # @param [Object] key # @return [Object] value # @api public def [](key) load(key) end # Store value with key # # @param [Object] key # @param [Object] value # @return value # @api public def []=(key, value) store(key, value) end # Calls block once for each key in store, passing the key as a parameter. If # no block is given, an enumerator is returned instead. # # @note Not every Moneta store implements this method, # a NotImplementedError is raised if it is not supported. # # @overload each_key # @return [Enumerator] An all-the-keys enumerator # # @overload each_key # @yieldparam key [Object] Each key is yielded to the supplied block # @return [self] # # @api public def each_key raise NotImplementedError, 'each_key is not supported' end # Atomically sets a key to value if it's not set. # # @note Not every Moneta store implements this method, # a NotImplementedError is raised if it is not supported. # @param [Object] key # @param [Object] value # @param [Hash] options # @option options [Integer] :expires Update expiration time (See {Expires}) # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @return [Boolean] key was set # @api public def create(key, value, options = {}) raise NotImplementedError, 'create is not supported' end # Returns an array containing the values associated with the given keys, in # the same order as the supplied keys. If a key is not present in the # key-value-store, nil is returned in its place. # # @note Some adapters may implement this method atomically, but the default # implementation simply makes repeated calls to {#load}. # # @param keys [] The keys for the values to fetch # @param options [Hash] # @option options (see Proxy#load) # @return [Array] Array containing the values requested, with # nil for missing values # @api public def values_at(*keys, **options) keys.map { |key| load(key, options) } end # Behaves identically to {#values_at} except that it accepts an optional # block. When supplied, the block will be called successively with each # supplied key that is not present in the store. The return value of the # block call will be used in place of nil in returned the array of values. # # @note Some adapters may implement this method atomically. The default # implmentation uses {#values_at}. # # @overload fetch_values(*keys, **options) # @param (see #values_at) # @option options (see #values_at) # @return (see #values_at) # @overload fetch_values(*keys, **options) # @param (see #values_at) # @option options (see #values_at) # @yieldparam key [Object] Each key that is not found in the store # @yieldreturn [Object, nil] The value to substitute for the missing one # @return [Array] Array containing the values requested, or # where keys are missing, the return values from the corresponding block # calls # @api public def fetch_values(*keys, **options) values = values_at(*keys, **options) return values unless block_given? keys.zip(values).map do |key, value| if value == nil yield key else value end end end # Returns a collection of key-value pairs corresponding to those supplied # keys which are present in the key-value store, and their associated # values. Only those keys present in the store will have pairs in the # return value. The return value can be any enumerable object that yields # pairs, so it could be a hash, but needn't be. # # @note The keys in the return value may be the same objects that were # supplied (i.e. {Object#equal?}), or may simply be equal (i.e. # {Object#==}). # # @note Some adapters may implement this method atomically. The default # implmentation uses {#values_at}. # # @param (see #values_at) # @option options (see #values_at) # @return [<(Object, Object)>] A collection of key-value pairs # @api public def slice(*keys, **options) keys.zip(values_at(*keys, **options)).reject do |_, value| value == nil end end # Stores the pairs in the key-value store, and returns itself. When a block # is provided, it will be called before overwriting any existing values with # the key, old value and supplied value, and the return value of the block # will be used in place of the supplied value. # # @note Some adapters may implement this method atomically, or in two passes # when a block is provided. The default implmentation uses {#key?}, # {#load} and {#store}. # # @overload merge!(pairs, options={}) # @param [<(Object, Object)>] pairs A collection of key-value pairs to store # @param [Hash] options # @option options (see Proxy#store) # @return [self] # @overload merge!(pairs, options={}) # @param [<(Object, Object)>] pairs A collection of key-value pairs to store # @param [Hash] options # @option options (see Proxy#store) # @yieldparam key [Object] A key that whose value is being overwritten # @yieldparam old_value [Object] The existing value which is being overwritten # @yieldparam new_value [Object] The value supplied in the method call # @yieldreturn [Object] The value to use for overwriting # @return [self] # @api public def merge!(pairs, options = {}) pairs.each do |key, value| if block_given? existing = load(key, options) value = yield(key, existing, value) unless existing == nil end store(key, value, options) end self end # (see #merge!) def update(pairs, options = {}, &block) merge!(pairs, options, &block) end # Returns features list # # @return [Array] list of features def features self.class.features end # Return true if adapter supports the given feature. # # @return [Boolean] def supports?(feature) features.include?(feature) end end end moneta-1.5.2/lib/moneta/each_key_support.rb000066400000000000000000000013111433316074200207230ustar00rootroot00000000000000module Moneta # This provides an each_key implementation that works in most cases. # @api private module EachKeySupport def each_key return enum_for(:each_key) unless block_given? if @backend.respond_to?(:each_key) @backend.each_key { |key| yield key } elsif @backend.respond_to?(:keys) if keys = @backend.keys keys.each { |key| yield key } end elsif @backend.respond_to?(:each) @backend.each { |key, _| yield key } else raise ::NotImplementedError, "No enumerable found on backend" end self end def self.included(base) base.supports(:each_key) if base.respond_to?(:supports) end end end moneta-1.5.2/lib/moneta/enumerable.rb000066400000000000000000000015561433316074200175110ustar00rootroot00000000000000module Moneta # Adds the Ruby {Enumerable} API to the store. The underlying store must # support `:each_key`. # # @example Adding to a builder # Moneta.build do # # It should be the top middleware # use :Enumerable # adapter :DBM # end # # @api public class Enumerable < Proxy include ::Enumerable def initialize(adapter, options = {}) raise "Adapter must support :each_key" unless adapter.supports? :each_key super end # Enumerate over all pairs in the store # # @overload each # @return [Enumerator] # # @overload each # @yieldparam pair [Array<(Object, Object)>] Each pair is yielded # @return [self] # def each return enum_for(:each) unless block_given? each_key { |key| yield key, load(key) } self end alias each_pair each end end moneta-1.5.2/lib/moneta/expires.rb000066400000000000000000000124651433316074200170520ustar00rootroot00000000000000module Moneta # Adds expiration support to the underlying store # # `#store`, `#load` and `#key?` support the `:expires` option to set/update # the expiration time. # # @api public class Expires < Proxy include ExpiresSupport # @param [Moneta store] adapter The underlying store # @param [Hash] options # @option options [String] :expires Default expiration time def initialize(adapter, options = {}) raise 'Store already supports feature :expires' if adapter.supports?(:expires) super end # (see Proxy#key?) def key?(key, options = {}) # Transformer might raise exception load_entry(key, options) != nil rescue super(key, Utils.without(options, :expires)) end # (see Proxy#load) def load(key, options = {}) return super if options.include?(:raw) value, = load_entry(key, options) value end # (see Proxy#store) def store(key, value, options = {}) return super if options.include?(:raw) expires = expires_at(options) super(key, new_entry(value, expires), Utils.without(options, :expires)) value end # (see Proxy#delete) def delete(key, options = {}) return super if options.include?(:raw) value, expires = super value if !expires || Time.now <= Time.at(expires) end # (see Proxy#store) def create(key, value, options = {}) return super if options.include?(:raw) expires = expires_at(options) @adapter.create(key, new_entry(value, expires), Utils.without(options, :expires)) end # (see Defaults#values_at) def values_at(*keys, **options) return super if options.include?(:raw) new_expires = expires_at(options, nil) options = Utils.without(options, :expires) with_updates(options) do |updates| keys.zip(@adapter.values_at(*keys, **options)).map do |key, entry| entry = invalidate_entry(key, entry, new_expires) do |new_entry| updates[key] = new_entry end next if entry == nil value, = entry value end end end # (see Defaults#fetch_values) def fetch_values(*keys, **options) return super if options.include?(:raw) new_expires = expires_at(options, nil) options = Utils.without(options, :expires) substituted = {} block = if block_given? lambda do |key| substituted[key] = true yield key end end with_updates(options) do |updates| keys.zip(@adapter.fetch_values(*keys, **options, &block)).map do |key, entry| next entry if substituted[key] entry = invalidate_entry(key, entry, new_expires) do |new_entry| updates[key] = new_entry end if entry == nil value = if block_given? yield key end else value, = entry end value end end end # (see Defaults#slice) def slice(*keys, **options) return super if options.include?(:raw) new_expires = expires_at(options, nil) options = Utils.without(options, :expires) with_updates(options) do |updates| @adapter.slice(*keys, **options).map do |key, entry| entry = invalidate_entry(key, entry, new_expires) do |new_entry| updates[key] = new_entry end next if entry == nil value, = entry [key, value] end.reject(&:nil?) end end # (see Defaults#merge!) def merge!(pairs, options = {}) expires = expires_at(options) options = Utils.without(options, :expires) block = if block_given? lambda do |key, old_entry, entry| old_entry = invalidate_entry(key, old_entry) if old_entry == nil entry # behave as if no replace is happening else old_value, = old_entry new_value, = entry new_entry(yield(key, old_value, new_value), expires) end end end entry_pairs = pairs.map do |key, value| [key, new_entry(value, expires)] end @adapter.merge!(entry_pairs, options, &block) self end private def load_entry(key, options) new_expires = expires_at(options, nil) options = Utils.without(options, :expires) entry = @adapter.load(key, options) invalidate_entry(key, entry, new_expires) do |new_entry| @adapter.store(key, new_entry, options) end end def invalidate_entry(key, entry, new_expires = nil) if entry != nil value, expires = entry if expires && Time.now > Time.at(expires) delete(key) entry = nil elsif new_expires != nil yield new_entry(value, new_expires) if block_given? end end entry end def new_entry(value, expires) if expires [value, expires.to_r] elsif Array === value || value == nil [value] else value end end def with_updates(options) updates = {} yield(updates).tap do @adapter.merge!(updates, options) unless updates.empty? end end end end moneta-1.5.2/lib/moneta/expires_support.rb000066400000000000000000000036171433316074200206450ustar00rootroot00000000000000module Moneta # This mixin handles the calculation of expiration times. # # module ExpiresSupport protected # Calculates the time when something will expire. # # This method considers false and 0 as "no-expire" and every positive # number as a time to live in seconds. # # @param [Hash] options Options hash # @option options [0,false,nil,Numeric] :expires expires value given by user # @param [0,false,nil,Numeric] default default expiration time # # @return [false] if it should not expire # @return [Time] the time when something should expire # @return [nil] if it is not known def expires_at(options, default = config.expires) value = expires_value(options, default) Numeric === value ? Time.now + value : value end # Calculates the number of seconds something should last. # # This method considers false and 0 as "no-expire" and every positive # number as a time to live in seconds. # # @param [Hash] options Options hash # @option options [0,false,nil,Numeric] :expires expires value given by user # @param [0,false,nil,Numeric] default default expiration time # # @return [false] if it should not expire # @return [Numeric] seconds until expiration # @return [nil] if it is not known def expires_value(options, default = config.expires) case value = options[:expires] when 0, false false when nil default ? default.to_r : nil when Numeric value = value.to_r raise ArgumentError, ":expires must be a positive value, got #{value}" if value < 0 value else raise ArgumentError, ":expires must be Numeric or false, got #{value.inspect}" end end class << self def included(base) base.supports(:expires) if base.respond_to?(:supports) base.config :expires end end end end moneta-1.5.2/lib/moneta/fallback.rb000066400000000000000000000041601433316074200171230ustar00rootroot00000000000000module Moneta # Provides a fallback to a second store when an exception is raised # # @example Basic usage - catches any {IOError} and falls back to {Moneta::Adapters:Null} # Moneta.build do # use :Fallback # adapter :Client # end # # @example Specifying an exception to rescue # Moneta.build do # use :Fallback, rescue: Redis::CannotConnectError # adapter :Redis # end # # @example Specifying a different fallback # Moneta.build do # use :Fallback do # # This is a new builder context # adapter :Memory # end # adapter :File, dir: 'cache' # end # # @api public class Fallback < Wrapper # @param [Moneta store] adapter The underlying store # @param [Hash] options # @option options [Moneta store] :fallback (:Null store) The store to fall # back on # @option options [Class|Array] :rescue ([IOError]) The list # of exceptions that should be rescued # @yieldreturn [Moneta store] Moneta store built using the builder API def initialize(adapter, options = {}, &block) super @fallback = if block_given? ::Moneta.build(&block) elsif options.key?(:fallback) options.delete(:fallback) else ::Moneta::Adapters::Null.new end @rescue = case options[:rescue] when nil [::IOError] when Array options[:rescue] else [options[:rescue]] end end protected def wrap(name, *args, &block) yield rescue => e raise unless @rescue.any? { |rescuable| rescuable === e } fallback(name, *args, &block) end def fallback(name, *args, &block) result = case name when :values_at, :fetch_values, :slice keys, options = args @fallback.public_send(name, *keys, **options, &block) else @fallback.public_send(name, *args, &block) end # Don't expose the fallback class to the caller if result == @fallback self else result end end end end moneta-1.5.2/lib/moneta/hash_adapter.rb000066400000000000000000000027571433316074200200210ustar00rootroot00000000000000module Moneta # @api private module HashAdapter attr_reader :backend # (see Proxy#key?) def key?(key, options = {}) @backend.has_key?(key) end # (see Proxy#load) def load(key, options = {}) @backend[key] end # (see Proxy#store) def store(key, value, options = {}) @backend[key] = value end # (see Proxy#delete) def delete(key, options = {}) @backend.delete(key) end # (see Proxy#clear) def clear(options = {}) @backend.clear self end # (see Defaults#values_at) def values_at(*keys, **options) return super unless @backend.respond_to? :values_at @backend.values_at(*keys) end # (see Defaults#fetch_values) def fetch_values(*keys, **options, &defaults) return super unless @backend.respond_to? :fetch_values defaults ||= {} # prevents KeyError @backend.fetch_values(*keys, &defaults) end # (see Defaults#slice) def slice(*keys, **options) return super unless @backend.respond_to? :slice @backend.slice(*keys) end # (see Defaults#merge!) def merge!(pairs, options = {}, &block) return super unless method = [:merge!, :update].find do |method| @backend.respond_to? method end hash = Hash === pairs ? pairs : Hash[pairs.to_a] case method when :merge! @backend.merge!(hash, &block) when :update @backend.update(hash, &block) end self end end end moneta-1.5.2/lib/moneta/increment_support.rb000066400000000000000000000006261433316074200211470ustar00rootroot00000000000000module Moneta # @api private module IncrementSupport # (see Defaults#increment) def increment(key, amount = 1, options = {}) existing = load(key, options) value = (existing == nil ? 0 : Integer(existing)) + amount store(key, value.to_s, options) value end def self.included(base) base.supports(:increment) if base.respond_to?(:supports) end end end moneta-1.5.2/lib/moneta/lock.rb000066400000000000000000000015701433316074200163160ustar00rootroot00000000000000require 'set' module Moneta # Locks the underlying stores with a Mutex # @api public class Lock < Wrapper # @param [Moneta store] adapter The underlying store # @param [Hash] options # @option options [String] :mutex (::Mutex.new) Mutex object def initialize(adapter, options = {}) super @lock = options[:mutex] || ::Mutex.new end protected def wrap(name, *args, &block) self.locks ||= Set.new if locked? yield else lock!(&block) end end def locks=(locks) Thread.current.thread_variable_set('Moneta::Lock', locks) end def locks Thread.current.thread_variable_get('Moneta::Lock') end def lock!(&block) locks << @lock @lock.synchronize(&block) ensure locks.delete @lock end def locked? locks.include? @lock end end end moneta-1.5.2/lib/moneta/logger.rb000066400000000000000000000035141433316074200166450ustar00rootroot00000000000000module Moneta # Logger proxy # @api public class Logger < Wrapper # Standard formatter used by the logger # @api public class Format def initialize(options) @prefix = options[:prefix] || 'Moneta ' if options[:file] @close = true @out = File.open(options[:file], 'a') else @close = options[:close] @out = options[:out] || STDOUT end end def log(entry) @out.write(format(entry)) end def close @out.close if @close end protected def format(entry) args = entry[:args] args.pop if Hash === args.last && args.last.empty? args = args.map { |a| dump(a) }.join(', ') if entry[:error] "#{@prefix}#{entry[:method]}(#{args}) raised error: #{entry[:error].message}\n" else "#{@prefix}#{entry[:method]}(#{args}) -> #{dump entry[:return]}\n" end end def dump(value) value = value.inspect value.size > 30 ? value[0..30] + '...' : value end end # @param [Moneta store] adapter The underlying store # @param [Hash] options # @option options [Object] :logger (Moneta::Logger::Format) Logger object # @option options [String] :prefix ('Moneta ') Prefix string # @option options [File] :file Log file # @option options [IO] :out (STDOUT) Output def initialize(adapter, options = {}) super @logger = options[:logger] || Format.new(options) end def close super @logger.close nil end protected def wrap(method, *args) ret = yield @logger.log(method: method, args: args, return: (method == :clear ? 'self' : ret)) ret rescue => error @logger.log(method: method, args: args, error: error) raise end end end moneta-1.5.2/lib/moneta/nil_values.rb000066400000000000000000000016241433316074200175270ustar00rootroot00000000000000module Moneta # This contains overrides of methods in Defaults where additional nil # checks are required, because nil values are possible in the store. # @api private module NilValues def fetch_values(*keys, **options) values = values_at(*keys, **options) return values unless block_given? keys.zip(values).map do |key, value| if value == nil && !key?(key) yield key else value end end end def slice(*keys, **options) keys.zip(values_at(*keys, **options)).reject do |key, value| value == nil && !key?(key) end end def merge!(pairs, options = {}) pairs.each do |key, value| if block_given? && key?(key, options) existing = load(key, options) value = yield(key, existing, value) end store(key, value, options) end self end end end moneta-1.5.2/lib/moneta/option_support.rb000066400000000000000000000024161433316074200204720ustar00rootroot00000000000000module Moneta # @api private module OptionSupport # Return Moneta store with default options or additional proxies # # @param [Hash] options Options to merge # @return [Moneta store] # # @api public def with(options = nil, &block) adapter = self if block builder = Builder.new(&block) builder.adapter(adapter) adapter = builder.build.last end options ? OptionMerger.new(adapter, options) : adapter end # Return Moneta store with default option raw: true # # @return [OptionMerger] # @api public def raw @raw ||= begin store = with(raw: true, only: [:load, :store, :create, :delete]) store.instance_variable_set(:@raw, store) store end end # Return Moneta store with default prefix option # # @param [String] prefix Key prefix # @return [OptionMerger] # @api public def prefix(prefix) with(prefix: prefix, except: :clear) end # Return Moneta store with default expiration time # # @param [Integer] expires Default expiration time # @return [OptionMerger] # @api public def expires(expires) with(expires: expires, only: [:store, :create, :increment]) end end end moneta-1.5.2/lib/moneta/optionmerger.rb000066400000000000000000000024571433316074200201050ustar00rootroot00000000000000module Moneta # @api private class OptionMerger < Wrapper METHODS = [:key?, :load, :store, :create, :delete, :increment, :clear].freeze attr_reader :default_options # @param [Moneta store] adapter underlying adapter # @param [Hash] options def initialize(adapter, options = {}) super(adapter, options) @default_options = adapter.respond_to?(:default_options) ? adapter.default_options.dup : {} if options.include?(:only) raise ArgumentError, 'Either :only or :except is allowed' if options.include?(:except) methods = [options.delete(:only)].compact.flatten elsif options.include?(:except) methods = METHODS - [options.delete(:except)].compact.flatten else methods = METHODS end methods.each do |method| if oldopts = @default_options[method] newopts = (@default_options[method] = oldopts.merge(options)) newopts[:prefix] = "#{oldopts[:prefix]}#{options[:prefix]}" if oldopts[:prefix] || options[:prefix] else @default_options[method] = options end end end protected def wrap(method, *args) options = args.last options.merge!(@default_options[method]) if Hash === options && @default_options.include?(method) yield end end end moneta-1.5.2/lib/moneta/pool.rb000066400000000000000000000237471433316074200163510ustar00rootroot00000000000000require 'set' module Moneta # Creates a thread-safe pool. Stores are in the pool are transparently # checked in and out in order to perform operations. # # A `max` setting can be specified in order to limit the pool size. If `max` # stores are all checked out at once, the next check-out will block until one # of the other stores are checked in. # # A `ttl` setting can be specified, giving the number of seconds to # wait without any activity before shrinking the pool size back down to the # min size. # # A `timeout` setting can be specified, giving the number of seconds to wait # when checking out a store, before an error is raised. When the pool has a # `:max` size, a timeout is highly advisable. # # @example Add `Moneta::Pool` to proxy stack # Moneta.build do # use(:Pool) do # adapter :MemcachedNative # end # end # # @example Add `Moneta::Pool` that contains at least 2 stores, and closes any extras after 60 seconds of inactivity # Moneta.build do # use(:Pool, min: 2, ttl: 60) do # adapter :Sqlite, file: 'test.db' # end # end # # @example Add `Moneta::Pool` with a max of 10 stores, and a timeout of 5 seconds for checkout # Moneta.build do # use(:Pool, max: 10, timeout: 5) do # adapter :Sqlite, file: 'test.db' # end # end # # @api public class Pool < Wrapper # @api private class ShutdownError < ::RuntimeError; end class TimeoutError < ::RuntimeError; end # @api private class Reply attr_reader :resource def initialize(mutex) @mutex = mutex @resource = ::ConditionVariable.new @value = nil end def resolve(value) @mutex.synchronize do raise "Already resolved" if @value @value = value @resource.signal end nil end def wait @resource.wait(@mutex) @value end end # @api private class PoolManager def initialize(builder, min: 0, max: nil, ttl: nil, timeout: nil) @builder = builder @min = min @max = max @ttl = ttl @timeout = timeout @inbox = [] @mutex = ::Mutex.new @resource = ::ConditionVariable.new @stores = Set.new @available = [] @waiting = [] @waiting_since = [] if @timeout @last_checkout = nil @stopping = false @idle_time = nil # Launch the manager thread @thread = run end def stats push(:stats, reply: true) end def stop push(:stop) nil ensure @thread.value end def kill! @thread.kill nil end def check_out reply = push(:check_out, reply: true) raise reply if Exception === reply reply end def check_in(store) push(:check_in, store) end private def run Thread.new do begin populate_stores until @stopping && @stores.empty? loop_start = Time.now # Block until a message arrives, or until we time out for some reason request = pop # Record how long we were idle, for stats purposes @idle_time = Time.now - loop_start # If a message arrived, handle it handle_request(request) if request # Handle any stale checkout requests handle_timed_out_requests # Drop any stores that are no longer needed remove_unneeded_stores end rescue => e reject_waiting(e.message) raise end end end def populate_stores return if @stopping @available.push(add_store) while @stores.length < @min end # If the last checkout was more than timeout ago, drop any available stores def remove_unneeded_stores return unless @stopping || (@ttl && @last_checkout && Time.now - @last_checkout >= @ttl) while (@stopping || @stores.length > @min) and store = @available.pop store.close rescue nil @stores.delete(store) end end # If there are checkout requests that have been waiting too long, # feed them timeout errors. def handle_timed_out_requests while @timeout && !@waiting.empty? && (Time.now - @waiting_since.first) >= @timeout waiting_since = @waiting_since.shift @waiting.shift.resolve(TimeoutError.new("Waited %f seconds" % { secs: Time.now - waiting_since })) end end # This is called from outside the loop thread def push(message, what = nil, reply: nil) @mutex.synchronize do raise ShutdownError, "Pool has been shutdown" if reply && !@thread.alive? reply &&= Reply.new(@mutex) @inbox.push([message, what, reply]) @resource.signal reply.wait if reply end end # This method calculates the number of seconds to wait for a signal on # the condition variable, or `nil` if there is no need to time out. # # Calculated based on the `:ttl` and `:timeout` options used during # construction. # # @return [Integer, nil] def timeout # Time to wait before there will be stores that should be closed ttl = if @ttl && @last_checkout && stores_available? && stores_unneeded? [@ttl - (Time.now - @last_checkout), 0].max end # Time to wait timeout = if @timeout && !@waiting_since.empty? longest_waiting = @waiting_since.first [@timeout - (Time.now - longest_waiting), 0].max end [ttl, timeout].compact.min end def stores_available? !@available.empty? end def stores_unneeded? @stores.length > @min end def stores_maxed? @max != nil && @stores.length == @max end def pop @mutex.synchronize do @resource.wait(@mutex, timeout) if @inbox.empty? @inbox.shift end end def add_store store = @builder.build.last @stores.add(store) store end def handle_check_out(reply) @last_checkout = Time.now if @stopping reply.resolve(ShutdownError.new("Shutting down")) elsif !@available.empty? reply.resolve(@available.pop) elsif !stores_maxed? begin reply.resolve(add_store) rescue => e reply.resolve(e) end else @waiting.push(reply) @waiting_since.push(Time.now) if @timeout end end def handle_stop @stopping = true # Reject anyone left waiting reject_waiting "Shutting down" end def reject_waiting(reason) while reply = @waiting.shift reply.resolve(ShutdownError.new(reason)) end @waiting_since = [] if @timeout end def handle_check_in(store) if !@waiting.empty? @waiting.shift.resolve(store) @waiting_since.shift if @timeout else @available.push(store) end end def handle_stats(reply) reply.resolve(stores: @stores.length, available: @available.length, waiting: @waiting.length, longest_wait: @timeout && !@waiting_since.empty? ? @waiting_since.first.dup : nil, stopping: @stopping, last_checkout: @last_checkout && @last_checkout.dup, idle_time: @idle_time.dup) end def handle_request(request) cmd, what, reply = request case cmd when :check_out handle_check_out(reply) when :check_in # A checkin request handle_check_in(what) when :stats handle_stats(reply) when :stop # Graceful exit handle_stop end end end # @param [Hash] options # @option options [Integer] :min (0) The minimum pool size # @option options [Integer] :max The maximum pool size. If not specified, # there is no maximum. # @option options [Numeric] :ttl The number of seconds to keep # stores above the minumum number around for without activity. If # not specified, stores will never be removed. # @option options [Numeric] :timeout The number of seconds to wait for a # store to become available. If not specified, will wait forever. # @yield A builder context for speciying how to construct stores def initialize(options = {}, &block) @id = "Moneta::Pool(#{object_id})" @manager = PoolManager.new(Builder.new(&block), **options) super(nil, options) end # Closing has no effect on the pool, as stores are closed in the background # by the manager after the ttl def close; end def each_key(&block) wrap(:each_key) do raise NotImplementedError, "each_key is not supported on this proxy" \ unless supports? :each_key return enum_for(:each_key) { adapter ? adapter.each_key.size : check_out! { adapter.each_key.size } } unless block_given? adapter.each_key(&block) self end end # Tells the manager to close all stores. It will not be possible to use # the store after this. def stop @manager.stop nil end def stats @manager.stats end protected def adapter Thread.current.thread_variable_get(@id) end def adapter=(store) Thread.current.thread_variable_set(@id, store) end def wrap(*args, &block) if adapter yield else check_out!(&block) end end def check_out! store = @manager.check_out self.adapter = store yield ensure self.adapter = nil @manager.check_in store if store end end end moneta-1.5.2/lib/moneta/proxy.rb000066400000000000000000000112761433316074200165530ustar00rootroot00000000000000module Moneta # Proxy base class # @api public class Proxy include Defaults include Config attr_reader :adapter # @param [Moneta store] adapter underlying adapter # @param [Hash] options def initialize(adapter, options = {}) @adapter = adapter configure(**options) end # (see Defaults#key?) def key?(key, options = {}) adapter.key?(key, options) end # (see Defaults#each_key) def each_key(&block) raise NotImplementedError, "each_key is not supported on this proxy" \ unless supports? :each_key return enum_for(:each_key) { adapter.each_key.size } unless block_given? adapter.each_key(&block) self end # (see Defaults#increment) def increment(key, amount = 1, options = {}) adapter.increment(key, amount, options) end # (see Defaults#create) def create(key, value, options = {}) adapter.create(key, value, options) end # (see Defaults#close) def close adapter.close end # Fetch value with key. Return nil if the key doesn't exist # # @param [Object] key # @param [Hash] options # @option options [Integer] :expires Update expiration time (See {Expires}) # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @option options [Boolean] :sync Synchronized load ({Cache} reloads from adapter, {Adapters::Daybreak} syncs with file) # @option options Other options as defined by the adapters or middleware # @return [Object] value # @api public def load(key, options = {}) adapter.load(key, options) end # Store value with key # # @param [Object] key # @param [Object] value # @param [Hash] options # @option options [Integer] :expires Set expiration time (See {Expires}) # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @option options Other options as defined by the adapters or middleware # @return value # @api public def store(key, value, options = {}) adapter.store(key, value, options) end # Delete the key from the store and return the current value # # @param [Object] key # @return [Object] current value # @param [Hash] options # @option options [Boolean] :raw Raw access without value transformation (See {Transformer}) # @option options [String] :prefix Prefix key (See {Transformer}) # @option options Other options as defined by the adapters or middleware # @api public def delete(key, options = {}) adapter.delete(key, options) end # Clear all keys in this store # # @param [Hash] options # @return [void] # @api public def clear(options = {}) adapter.clear(options) self end # (see Defaults#values_at) def values_at(*keys, **options) adapter.values_at(*keys, **options) end # (see Defaults#fetch_values) def fetch_values(*keys, **options, &defaults) adapter.fetch_values(*keys, **options, &defaults) end # (see Defaults#slice) def slice(*keys, **options) adapter.slice(*keys, **options) end # (see Defaults#merge!) def merge!(pairs, options = {}, &block) adapter.merge!(pairs, options, &block) self end # (see Defaults#features) def features @features ||= (self.class.features | adapter.features - self.class.features_mask).freeze end class << self # @api private def features_mask @features_mask ||= [].freeze end # (see Defaults::ClassMethods#not_supports) def not_supports(*features) @features_mask = (features_mask | features).freeze super end end # Overrides the default implementation of the config method to: # # * pass the adapter's config, if this proxy has no configuration of its # own # * return a merged configuration, allowing the proxy have precedence over # the adapter def config unless @proxy_config config = super adapter_config = adapter.config if adapter.class.include?(Config) @proxy_config = if config && adapter_config adapter_members = adapter_config.members - config.members members = config.members + adapter_members struct = Struct.new(*members) values = config.values + adapter_config.to_h.values_at(*adapter_members) struct.new(*values) else config || adapter_config end end @proxy_config end end end moneta-1.5.2/lib/moneta/server.rb000066400000000000000000000165651433316074200167060ustar00rootroot00000000000000require 'socket' module Moneta # Moneta server to be used together with Moneta::Adapters::Client # @api public class Server include Config config :timeout, default: 1 config :max_size, default: 0x100000 # @api private class Connection def initialize(io, store, max_size) @io = io @store = store @max_size = max_size @fiber = Fiber.new { run } end def resume(result = nil) @fiber.resume result end private # The return value of this function will be sent to the reactor. # # @return [:closed,Exception] def run catch :closed do loop { write_dispatch(read_msg) } end :closed rescue => ex ex ensure @io.close unless @io.closed? end def dispatch(method, args) case method when :key?, :load, :delete, :increment, :create, :features @store.public_send(method, *args) when :store, :clear @store.public_send(method, *args) nil when :each_key yield_each(@store.each_key) nil end rescue => ex ex end def write_dispatch(msg) method, *args = msg result = dispatch(method, args) write(result) end def read_msg size = read(4).unpack('N').first throw :closed, 'Message too big' if size > @max_size Marshal.load(read(size)) end def read(len) buffer = '' loop do begin case received = @io.recv_nonblock(len) when '', nil throw :closed, 'Closed during read' else buffer << received len -= received.bytesize end rescue IO::WaitReadable, IO::WaitWritable yield_to_reactor(:read) rescue Errno::ECONNRESET throw :closed, 'Closed during read' rescue IOError => ex if ex.message =~ /closed stream/ throw :closed, 'Closed during read' else raise end end break if len == 0 end buffer end def write(obj) buffer = pack(obj) until buffer.empty? begin len = sendmsg(buffer) buffer = buffer.byteslice(len...buffer.length) rescue IO::WaitWritable, Errno::EINTR yield_to_reactor(:write) end end nil end # Detect support for socket#sendmsg_nonblock Socket.new(Socket::AF_INET, Socket::SOCK_STREAM).tap do |socket| begin socket.sendmsg_nonblock('probe') rescue Errno::EPIPE, Errno::ENOTCONN def sendmsg(msg) @io.sendmsg_nonblock(msg) end rescue NotImplementedError def sendmsg(msg) @io.write_nonblock(msg) end end end def yield_to_reactor(mode = :read) if Fiber.yield(mode) == :close throw :closed, 'Closed by reactor' end end def pack(obj) s = Marshal.dump(obj) [s.bytesize].pack('N') << s end def yield_each(enumerator) received_break = false loop do case msg = read_msg when %w{NEXT} # This will raise a StopIteration at the end of the enumeration, # which will exit the loop. write(enumerator.next) when %w{BREAK} # This is received when the client wants to stop the enumeration. received_break = true break else # Otherwise, the client is attempting to call another method within # an `each` block. write_dispatch(msg) end end ensure # This tells the client to stop enumerating write(StopIteration.new("Server initiated stop")) unless received_break end end # @param [Hash] options # @option options [Integer] :port (9000) TCP port # @option options [String] :socket Alternative Unix socket file name # @option options [Integer] :timeout (1) Number of seconds to timeout on IO.select # @option options [Integer] :max_size (0x100000) Maximum number of bytes # allowed to be sent by clients in requests def initialize(store, options = {}) options = configure(**options) @store = store @server = start(**options) @ios = [@server] @reads = @ios.dup @writes = [] @connections = {} @running = false end # Is the server running # # @return [Boolean] true if the server is running def running? @running end # Run the server # # @note This method blocks! def run raise 'Already running' if running? @stop = false @running = true begin mainloop until @stop ensure @running = false @server.close unless @server.closed? @ios .reject { |io| io == @server } .each { |io| close_connection(io) } File.unlink(config.socket) if config.socket rescue nil end end # Stop the server def stop raise 'Not running' unless running? @stop = true @server.close nil end private def mainloop if ready = IO.select(@reads, @writes, @ios, config.timeout) reads, writes, errors = ready errors.each { |io| close_connection(io) } @reads -= reads reads.each do |io| io == @server ? accept_connection : resume(io) end @writes -= writes writes.each { |io| resume(io) } end rescue SignalException => signal warn "Moneta::Server - received #{signal}" case signal.signo when Signal.list['INT'], Signal.list['TERM'] @stop = true # graceful exit end rescue IOError => ex # We get a lot of these "closed stream" errors, which we ignore raise unless ex.message =~ /closed stream/ rescue Errno::EBADF => ex warn "Moneta::Server - #{ex.message}" end def accept_connection io = @server.accept @connections[io] = Connection.new(io, @store, config.max_size) @ios << io resume(io) ensure @reads << @server end def delete_connection(io) @ios.delete(io) @reads.delete(io) @writes.delete(io) end def close_connection(io) delete_connection(io) @connections.delete(io).resume(:close) end def resume(io) case result = @connections[io].resume when :closed # graceful exit delete_connection(io) when Exception # messy exit delete_connection(io) raise result when :read @reads << io when :write @writes << io end end def start(host: '127.0.0.1', port: 9000, socket: nil) if socket begin UNIXServer.open(socket) rescue Errno::EADDRINUSE if client = (UNIXSocket.open(socket) rescue nil) client.close raise end File.unlink(socket) tries ||= 0 (tries += 1) < 3 ? retry : raise end else TCPServer.open(host, port) end end def stats { connections: @connections.length, reading: @reads.length, writing: @writes.length, total: @ios.length } end end end moneta-1.5.2/lib/moneta/shared.rb000066400000000000000000000044671433316074200166440ustar00rootroot00000000000000module Moneta # Shares a store between processes # # @example Share a store # Moneta.build do # use :Transformer, key: :marshal, value: :marshal # use :Shared do # adapter :GDBM, file: 'shared.db' # end # end # # @api public class Shared < Wrapper # @param [Hash] options # @option options [Integer] :port (9000) TCP port # @option options [String] :host Server hostname # @option options [String] :socket Unix socket file name def initialize(options = {}, &block) @options = options @builder = Builder.new(&block) @connect_lock = ::Mutex.new end # (see Proxy#close) def close if server? @server.stop @thread.join @server = @thread = nil end if @adapter @adapter.close @adapter = nil end end # Returns true if this wrapper is running as the server # # @return [Boolean] wrapper is a server def server? @server != nil end protected def wrap(*args) connect yield rescue Errno::ECONNRESET, Errno::EPIPE, IOError, SystemCallError @connect_lock.synchronize { close unless server? } tries ||= 0 (tries += 1) < 3 ? retry : raise end def connect return if @adapter @connect_lock.synchronize do @adapter ||= Adapters::Client.new(@options) end rescue Errno::ECONNREFUSED, Errno::ENOENT, IOError => ex start_server tries ||= 0 warn "Moneta::Shared - Failed to connect: #{ex.message}" if tries > 0 (tries += 1) < 10 ? retry : raise end # TODO: Implement this using forking (MRI) and threading (JRuby) # to get maximal performance def start_server @connect_lock.synchronize do return if server? begin raise "Adapter already set" if @adapter @adapter = Lock.new(@builder.build.last) raise "Server already set" if server? @server = Server.new(@adapter, @options) @thread = Thread.new { @server.run } sleep 0.1 until @server.running? rescue => ex @adapter.close if @adapter @adapter = nil @server = nil @thread = nil warn "Moneta::Shared - Failed to start server: #{ex.message}" end end end end end moneta-1.5.2/lib/moneta/stack.rb000066400000000000000000000046121433316074200164730ustar00rootroot00000000000000module Moneta # Combines multiple stores. Reads return the result from the first store, # writes go to all stores. # # @example Add `Moneta::Stack` to proxy stack # Moneta.build do # use(:Stack) do # add { adapter :Redis } # add { adapter :File, dir: 'data' } # add { adapter :File, dir: 'replicate' } # end # end # # @api public class Stack include Defaults # @api private class DSL def initialize(stack, &block) @stack = stack instance_eval(&block) end # @api public def add(store = nil, &block) raise ArgumentError, 'Only argument or block allowed' if store && block @stack << (store || Moneta.build(&block)) nil end end attr_reader :stack # @param [Hash] options Options hash # @option options [Array] :stack Array of Moneta stores # @yieldparam Builder block def initialize(options = {}, &block) @stack = options[:stack].to_a DSL.new(@stack, &block) if block_given? end # (see Proxy#key?) def key?(key, options = {}) @stack.any? { |s| s.key?(key, options) } end # (see Proxy#load) def load(key, options = {}) @stack.each do |s| value = s.load(key, options) return value if value != nil end nil end # (see Proxy#store) def store(key, value, options = {}) @stack.each { |s| s.store(key, value, options) } value end # (see Proxy#increment) def increment(key, amount = 1, options = {}) last = nil @stack.each { |s| last = s.increment(key, amount, options) } last end # (see Proxy#create) def create(key, value, options = {}) last = false @stack.each { |s| last = s.create(key, value, options) } last end # (see Proxy#delete) def delete(key, options = {}) @stack.inject(nil) do |value, s| v = s.delete(key, options) value || v end end # (see Proxy#clear) def clear(options = {}) @stack.each { |s| s.clear(options) } self end # (see Proxy#close) def close @stack.each { |s| s.close } nil end # (see Proxy#features) def features @features ||= begin features = @stack.map(&:features) (features.inject(features.first, &:&) - [:each_key]).freeze end end end end moneta-1.5.2/lib/moneta/synchronize.rb000066400000000000000000000057211433316074200177430ustar00rootroot00000000000000module Moneta # Base class for {Mutex} and {Semaphore} # @abstract class SynchronizePrimitive # Synchronize block # # @api public # @yieldparam Synchronized block # @return [Object] result of block def synchronize enter yield ensure leave end # Try to enter critical section (nonblocking) # # @return [Boolean] true if the lock was acquired def try_enter raise 'Already locked' if @locked enter_primitive ? @locked = true : false end alias try_lock try_enter # Enter critical section (blocking) # # @param [Number] timeout Maximum time to wait # @param [Number] wait Sleep time between tries to acquire lock # @return [Boolean] true if the lock was aquired def enter(timeout = nil, wait = 0.01) time_at_timeout = Time.now + timeout if timeout while !timeout || Time.now < time_at_timeout return true if try_enter sleep(wait) end false end alias lock enter # Leave critical section def leave raise 'Not locked' unless @locked leave_primitive @locked = false nil end alias unlock leave # Is the lock acquired? def locked? @locked end end # Distributed/shared store-wide mutex # # @example Use `Moneta::Mutex` # mutex = Moneta::Mutex.new(store, 'mutex') # mutex.synchronize do # # Synchronized access # store['counter'] += 1 # end # # @api public class Mutex < SynchronizePrimitive # @param [Moneta store] store The store we want to lock # @param [Object] lock Key of the lock entry def initialize(store, lock) raise 'Store must support feature :create' unless store.supports?(:create) @store, @lock = store, lock end protected def enter_primitive @store.create(@lock, '', expires: false) end def leave_primitive @store.delete(@lock) end end # Distributed/shared store-wide semaphore # # @example Use `Moneta::Semaphore` # semaphore = Moneta::Semaphore.new(store, 'semaphore', 2) # semaphore.synchronize do # # Synchronized access # # ... # end # # @api public class Semaphore < SynchronizePrimitive # @param [Moneta store] store The store we want to lock # @param [Object] counter Key of the counter entry # @param [Integer] max Maximum number of threads which are allowed to enter the critical section def initialize(store, counter, max = 1) raise 'Store must support feature :increment' unless store.supports?(:increment) @store, @counter, @max = store, counter, max @store.increment(@counter, 0, expires: false) # Ensure that counter exists end protected def enter_primitive if @store.increment(@counter, 1) <= @max true else @store.decrement(@counter) false end end def leave_primitive @store.decrement(@counter) end end end moneta-1.5.2/lib/moneta/transformer.rb000066400000000000000000000370521433316074200177340ustar00rootroot00000000000000module Moneta # Transforms keys and values (Marshal, YAML, JSON, Base64, MD5, ...). # You can bypass the transformer (e.g. serialization) by using the `:raw` option. # # @example Add `Moneta::Transformer` to proxy stack # Moneta.build do # transformer key: [:marshal, :escape], value: [:marshal] # adapter :File, dir: 'data' # end # # @example Bypass serialization # store.store('key', 'value', raw: true) # store['key'] # raises an Exception # store.load('key', raw: true) # returns 'value' # # store['key'] = 'value' # store.load('key', raw: true) # returns "\x04\bI\"\nvalue\x06:\x06ET" # # @api public class Transformer < Proxy class << self alias original_new new # @param [Moneta store] adapter The underlying store # @param [Hash] options # @return [Transformer] new Moneta transformer # @option options [Array] :key List of key transformers in the order in which they should be applied # @option options [Array] :value List of value transformers in the order in which they should be applied # @option options [String] :prefix Prefix string for key namespacing (Used by the :prefix key transformer) # @option options [String] :secret HMAC secret to verify values (Used by the :hmac value transformer) # @option options [Integer] :maxlen Maximum key length (Used by the :truncate key transformer) def new(adapter, options = {}) keys = [options[:key]].flatten.compact values = [options[:value]].flatten.compact raise ArgumentError, 'Option :key or :value is required' if keys.empty? && values.empty? options[:prefix] ||= '' if keys.include?(:prefix) name = class_name(keys, values) const_set(name, compile(keys, values)) unless const_defined?(name) const_get(name).original_new(adapter, options) end private def compile(keys, values) @key_validator ||= compile_validator(KEY_TRANSFORMER) @load_key_validator ||= compile_validator(LOAD_KEY_TRANSFORMER) @test_key_validator ||= compile_validator(TEST_KEY_TRANSFORMER) @value_validator ||= compile_validator(VALUE_TRANSFORMER) raise ArgumentError, 'Invalid key transformer chain' if @key_validator !~ keys.map(&:inspect).join raise ArgumentError, 'Invalid value transformer chain' if @value_validator !~ values.map(&:inspect).join klass = Class.new(self) compile_each_key_support_clause(klass, keys) klass.class_eval <<-END_EVAL, __FILE__, __LINE__ + 1 def initialize(adapter, options = {}) super #{compile_initializer('key', keys)} #{compile_initializer('value', values)} end END_EVAL key, key_opts = compile_transformer(keys, 'key') key_load, key_load_opts = compile_transformer(keys.reverse, 'key', 1) if @load_key_validator =~ keys.map(&:inspect).join key_test, key_test_opts = compile_transformer(keys.reverse, 'key', 4) if @test_key_validator =~ keys.map(&:inspect).join dump, dump_opts = compile_transformer(values, 'value') load, load_opts = compile_transformer(values.reverse, 'value', 1) if values.empty? compile_key_transformer(klass, key, key_opts, key_load, key_load_opts, key_test, key_test_opts) elsif keys.empty? compile_value_transformer(klass, load, load_opts, dump, dump_opts) else compile_key_value_transformer(klass, key, key_opts, key_load, key_load_opts, key_test, key_test_opts, load, load_opts, dump, dump_opts) end klass end def without(*options) options = options.flatten.uniq options.empty? ? 'options' : "Utils.without(options, #{options.map(&:to_sym).map(&:inspect).join(', ')})" end def compile_each_key_support_clause(klass, keys) klass.class_eval <<-END_EVAL, __FILE__, __LINE__ + 1 #{'not_supports :each_key' if @load_key_validator !~ keys.map(&:inspect).join} END_EVAL end def compile_key_transformer(klass, key, key_opts, key_load, key_load_opts, key_test, key_test_opts) if_key_test = key_load && key_test ? "if #{key_test}" : '' klass.class_eval <<-END_EVAL, __FILE__, __LINE__ + 1 def key?(key, options = {}) @adapter.key?(#{key}, #{without key_opts}) end def each_key(&block) raise NotImplementedError, "each_key is not supported on this transformer" \ unless supports? :each_key return enum_for(:each_key) unless block_given? @adapter.each_key.lazy.map{ |key| #{key_load} #{if_key_test} }.reject(&:nil?).each(&block) self end def increment(key, amount = 1, options = {}) @adapter.increment(#{key}, amount, #{without key_opts}) end def load(key, options = {}) @adapter.load(#{key}, #{without :raw, key_opts}) end def store(key, value, options = {}) @adapter.store(#{key}, value, #{without :raw, key_opts}) end def delete(key, options = {}) @adapter.delete(#{key}, #{without :raw, key_opts}) end def create(key, value, options = {}) @adapter.create(#{key}, value, #{without :raw, key_opts}) end def values_at(*keys, **options) t_keys = keys.map { |key| #{key} } @adapter.values_at(*t_keys, **#{without :raw, key_opts}) end def fetch_values(*keys, **options) t_keys = keys.map { |key| #{key} } block = if block_given? key_lookup = Hash[t_keys.zip(keys)] lambda { |t_key| yield key_lookup[t_key] } end @adapter.fetch_values(*t_keys, **#{without :raw, key_opts}, &block) end def slice(*keys, **options) t_keys = keys.map { |key| #{key} } key_lookup = Hash[t_keys.zip(keys)] @adapter.slice(*t_keys, **#{without :raw, key_opts}).map do |key, value| [key_lookup[key], value] end end def merge!(pairs, options = {}) keys, values = pairs.to_a.transpose t_keys = keys.map { |key| #{key} } block = if block_given? key_lookup = Hash[t_keys.zip(keys)] lambda { |k, old, new| yield(key_lookup[k], old, new) } end @adapter.merge!(t_keys.zip(values), #{without :raw, key_opts}, &block) self end END_EVAL end def compile_value_transformer(klass, load, load_opts, dump, dump_opts) klass.class_eval <<-END_EVAL, __FILE__, __LINE__ + 1 def load(key, options = {}) value = @adapter.load(key, #{without :raw, load_opts}) value && !options[:raw] ? #{load} : value end def store(key, value, options = {}) @adapter.store(key, options[:raw] ? value : #{dump}, #{without :raw, dump_opts}) value end def delete(key, options = {}) value = @adapter.delete(key, #{without :raw, load_opts}) value && !options[:raw] ? #{load} : value end def create(key, value, options = {}) @adapter.create(key, options[:raw] ? value : #{dump}, #{without :raw, dump_opts}) end def values_at(*keys, **options) values = @adapter.values_at(*keys, **#{without :raw, load_opts}) values.map do |value| value && !options[:raw] ? #{load} : value end end def fetch_values(*keys, **options, &orig_block) substituted = {} block = if block_given? lambda { |key| substituted[key] = true; yield key } end values = @adapter.fetch_values(*keys, **#{without :raw, load_opts}, &block) if options[:raw] values else keys.map(&substituted.method(:key?)).zip(values).map do |substituted, value| if substituted || !value value else #{load} end end end end def slice(*keys, **options) @adapter.slice(*keys, **#{without :raw, load_opts}).map do |key, value| [key, value && !options[:raw] ? #{load} : value] end end def merge!(pairs, options = {}, &orig_block) block = if block_given? if options[:raw] orig_block else lambda do |k, old_val, new_val| value = old_val; old_val = #{load} value = new_val; new_val = #{load} value = yield(k, old_val, new_val) #{dump} end end end t_pairs = options[:raw] ? pairs : pairs.map { |key, value| [key, #{dump}] } @adapter.merge!(t_pairs, #{without :raw, dump_opts}, &block) self end END_EVAL end def compile_key_value_transformer(klass, key, key_opts, key_load, key_load_opts, key_test, key_test_opts, load, load_opts, dump, dump_opts) if_key_test = key_load && key_test ? "if #{key_test}" : '' klass.class_eval <<-END_EVAL, __FILE__, __LINE__ + 1 def key?(key, options = {}) @adapter.key?(#{key}, #{without key_opts}) end def each_key(&block) raise NotImplementedError, "each_key is not supported on this transformer" \ unless supports? :each_key return enum_for(:each_key) { @adapter.each_key.size } unless block_given? @adapter.each_key.lazy.map{ |key| #{key_load} #{if_key_test} }.reject(&:nil?).each(&block) self end def increment(key, amount = 1, options = {}) @adapter.increment(#{key}, amount, #{without key_opts}) end def load(key, options = {}) value = @adapter.load(#{key}, #{without :raw, key_opts, load_opts}) value && !options[:raw] ? #{load} : value end def store(key, value, options = {}) @adapter.store(#{key}, options[:raw] ? value : #{dump}, #{without :raw, key_opts, dump_opts}) value end def delete(key, options = {}) value = @adapter.delete(#{key}, #{without :raw, key_opts, load_opts}) value && !options[:raw] ? #{load} : value end def create(key, value, options = {}) @adapter.create(#{key}, options[:raw] ? value : #{dump}, #{without :raw, key_opts, dump_opts}) end def values_at(*keys, **options) t_keys = keys.map { |key| #{key} } values = @adapter.values_at(*t_keys, **#{without :raw, key_opts, load_opts}) values.map do |value| value && !options[:raw] ? #{load} : value end end def fetch_values(*keys, **options) t_keys = keys.map { |key| #{key} } key_lookup = Hash[t_keys.zip(keys)] substituted = {} block = if block_given? lambda do |t_key| key = key_lookup[t_key] substituted[key] = true yield key end end values = @adapter.fetch_values(*t_keys, **#{without :raw, key_opts, load_opts}, &block) if options[:raw] values else keys.map(&substituted.method(:key?)).zip(values).map do |substituted, value| if substituted || !value value else #{load} end end end end def slice(*keys, **options) t_keys = keys.map { |key| #{key} } key_lookup = Hash[t_keys.zip(keys)] @adapter.slice(*t_keys, **#{without :raw, key_opts, load_opts}).map do |key, value| [key_lookup[key], value && !options[:raw] ? #{load} : value] end end def merge!(pairs, options = {}) keys, values = pairs.to_a.transpose t_keys = keys.map { |key| #{key} } key_lookup = Hash[t_keys.zip(keys)] block = if block_given? if options[:raw] lambda do |k, old_val, new_val| yield(key_lookup[k], old_val, new_val) end else lambda do |k, old_val, new_val| value = old_val; old_val = #{load} value = new_val; new_val = #{load} value = yield(key_lookup[k], old_val, new_val) #{dump} end end end t_pairs = if options[:raw] t_keys.zip(values) else t_keys.zip(values.map { |value| #{dump} }) end @adapter.merge!(t_pairs, #{without :raw, key_opts, dump_opts}, &block) self end END_EVAL end # Compile option initializer def compile_initializer(type, transformers) transformers.map do |name| t = TRANSFORMER[name] (t[1].to_s + t[2].to_s).scan(/@\w+/).uniq.map do |opt| "raise ArgumentError, \"Option #{opt[1..-1]} is required for #{name} #{type} transformer\" unless #{opt} = options[:#{opt[1..-1]}]\n" end end.join("\n") end def compile_validator(str) Regexp.new('\A' + str.gsub(/\w+/) do '(' + TRANSFORMER.select { |_, v| v.first.to_s == $& }.map { |v| ":#{v.first}" }.join('|') + ')' end.gsub(/\s+/, '') + '\Z') end # Returned compiled transformer code string def compile_transformer(transformer, var, idx = 2) value, options = var, [] transformer.each do |name| raise ArgumentError, "Unknown transformer #{name}" unless t = TRANSFORMER[name] require t[3] if t[3] code = t[idx] code ||= compile_prefix(name: name, transformer: t, value: value) if idx == 4 && var == 'key' raise "Undefined command for transformer #{name}" unless code options += code.scan(/options\[:(\w+)\]/).flatten value = if t[0] == :serialize && var == 'key' && idx == 4 "(tmp = #{value}; (false === tmp || '' === tmp) ? false : #{code % 'tmp'})" elsif t[0] == :serialize && var == 'key' "(tmp = #{value}; String === tmp ? tmp : #{code % 'tmp'})" else code % value end end [value, options] end def class_name(keys, values) camel_case = lambda { |sym| sym.to_s.split('_').map(&:capitalize).join } (keys.empty? ? '' : keys.map(&camel_case).join + 'Key') + (values.empty? ? '' : values.map(&camel_case).join + 'Value') end def compile_prefix(name:, transformer:, value:) return unless [:encode, :serialize].include?(transformer[0]) load_val, = compile_transformer([name], value, 1) "(#{load_val} rescue '')" end end end end require 'moneta/transformer/helper' require 'moneta/transformer/config' moneta-1.5.2/lib/moneta/transformer/000077500000000000000000000000001433316074200174005ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/transformer/config.rb000066400000000000000000000124611433316074200211760ustar00rootroot00000000000000module Moneta class Transformer # Available key/value transformers TRANSFORMER = { # Name: [ Type, Load, Dump, Library Test ], bencode: [ :serialize, '::BEncode.load(%s)', '::BEncode.dump(%s)', 'bencode' ], bert: [ :serialize, '::BERT.decode(%s)', '::BERT.encode(%s)', 'bert' ], bson: [ :serialize, 'Helper::BSON.load(%s)', 'Helper::BSON.dump(%s)', 'bson' ], json: [ :serialize, '::MultiJson.load(%s)', '::MultiJson.dump(%s)', 'multi_json' ], marshal: [ :serialize, '::Marshal.load(%s)', '::Marshal.dump(%s)', nil ], msgpack: [ :serialize, '::MessagePack.unpack(%s)', '::MessagePack.pack(%s)', 'msgpack' ], ox: [ :serialize, '::Ox.parse_obj(%s)', '::Ox.dump(%s)', 'ox' ], php: [ :serialize, '::PHP.unserialize(%s)', '::PHP.serialize(%s)', 'php_serialize'], tnet: [ :serialize, '::TNetstring.parse(%s).first', '::TNetstring.dump(%s)', 'tnetstring' ], yaml: [ :serialize, '::YAML.load(%s)', '::YAML.dump(%s)', 'yaml' ], bzip2: [ :compress, 'Helper.bunzip2(%s)', 'Helper.bzip2(%s)', 'rbzip2' ], lz4: [ :compress, '::LZ4.uncompress(%s)', '::LZ4.compress(%s)', 'lz4-ruby' ], lzma: [ :compress, '::LZMA.decompress(%s)', '::LZMA.compress(%s)', 'lzma' ], lzo: [ :compress, '::LZO.decompress(%s)', '::LZO.compress(%s)', 'lzoruby' ], snappy: [ :compress, '::Snappy.inflate(%s)', '::Snappy.deflate(%s)', 'snappy' ], quicklz: [ :compress, '::QuickLZ.decompress(%s)', '::QuickLZ.compress(%s)', 'qlzruby' ], zlib: [ :compress, '::Zlib::Inflate.inflate(%s)', '::Zlib::Deflate.deflate(%s)', 'zlib' ], base64: [ :encode, "%s.unpack('m0').first", "[%s].pack('m0')" ], urlsafe_base64: [ :encode, 'Base64.urlsafe_decode64(%s)', 'Base64.urlsafe_encode64(%s)', 'base64' ], escape: [ :encode, 'Helper.unescape(%s)', 'Helper.escape(%s)' ], hex: [ :encode, "[%s].pack('H*')", "%s.unpack('H*').first" ], qp: [ :encode, "%s.unpack('M').first", "[%s].pack('M')" ], uuencode: [ :encode, "%s.unpack('u').first", "[%s].pack('u')" ], hmac: [ :hmac, 'Helper.hmacverify(%s, options[:secret] || @secret)', 'Helper.hmacsign(%s, options[:secret] || @secret)', 'openssl' ], prefix: [ :prefix, "%s.sub(@prefix, '')", '(options[:prefix] || @prefix)+%s', nil, "%s.start_with?(@prefix)" ], truncate: [ :truncate, nil, 'Helper.truncate(%s, @maxlen)', 'digest/md5' ], md5: [ :digest, nil, '::Digest::MD5.hexdigest(%s)', 'digest/md5' ], rmd160: [ :digest, nil, '::Digest::RMD160.hexdigest(%s)', 'digest/rmd160'], sha1: [ :digest, nil, '::Digest::SHA1.hexdigest(%s)', 'digest/sha1' ], sha256: [ :digest, nil, '::Digest::SHA256.hexdigest(%s)', 'digest/sha2' ], sha384: [ :digest, nil, '::Digest::SHA384.hexdigest(%s)', 'digest/sha2' ], sha512: [ :digest, nil, '::Digest::SHA512.hexdigest(%s)', 'digest/sha2' ], city32: [ :digest, nil, '::CityHash.hash32(%s).to_s(16)', 'cityhash' ], city64: [ :digest, nil, '::CityHash.hash64(%s).to_s(16)', 'cityhash' ], city128: [ :digest, nil, '::CityHash.hash128(%s).to_s(16)', 'cityhash' ], spread: [ :spread, nil, 'Helper.spread(%s)' ], to_s: [ :string, nil, '%s.to_s' ], inspect: [ :string, nil, '%s.inspect' ] }.freeze # Allowed value transformers (Read it like a regular expression!) VALUE_TRANSFORMER = 'serialize? compress? hmac? encode?'.freeze # Allowed key transformers (Read it like a regular expression!) KEY_TRANSFORMER = '(serialize | string)? prefix? ((encode? truncate?) | (digest spread?))?'.freeze # Key transformers that can be "loaded" (e.g. reversed) and can be used by the key enumeration feature LOAD_KEY_TRANSFORMER = 'serialize? prefix? encode?'.freeze # Key transformers that can be "tested for success" with a dumped key and can be used by the key enumeration feature TEST_KEY_TRANSFORMER = 'serialize? prefix? encode?'.freeze end end moneta-1.5.2/lib/moneta/transformer/helper.rb000066400000000000000000000024661433316074200212140ustar00rootroot00000000000000module Moneta class Transformer # @api private module Helper extend self def escape(value) value.gsub(/[^a-zA-Z0-9_-]+/) { |match| '%' + match.unpack('H2' * match.bytesize).join('%').upcase } end def unescape(value) value.gsub(/(?:%[0-9a-fA-F]{2})+/) { |match| [match.delete('%')].pack('H*') } end def hmacverify(value, secret) hash, value = value[0..31], value[32..-1] value if hash == OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha256'), secret, value) end def hmacsign(value, secret) OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha256'), secret, value) << value end def truncate(value, maxlen) if value.size >= maxlen digest = Digest::MD5.hexdigest(value) value = value[0, maxlen - digest.size] << digest end value end def spread(value) ::File.join(value[0..1], value[2..-1]) end def bzip2(value) io = ::StringIO.new bz = ::RBzip2.default_adapter::Compressor.new(io) bz.write(value) bz.close io.string end def bunzip2(value) ::RBzip2.default_adapter::Decompressor.new(::StringIO.new(value)).read end autoload :BSON, 'moneta/transformer/helper/bson' end end end moneta-1.5.2/lib/moneta/transformer/helper/000077500000000000000000000000001433316074200206575ustar00rootroot00000000000000moneta-1.5.2/lib/moneta/transformer/helper/bson.rb000066400000000000000000000005241433316074200221460ustar00rootroot00000000000000module Moneta class Transformer module Helper # @api private module BSON extend self def load(value) ::BSON::Document.from_bson(::BSON::ByteBuffer.new(value))['v'] end def dump(value) ::BSON::Document['v' => value].to_bson.to_s end end end end end moneta-1.5.2/lib/moneta/utils.rb000066400000000000000000000006271433316074200165300ustar00rootroot00000000000000module Moneta # @api private module Utils extend self def without(hash, *keys) return hash if hash.empty? if keys.any? { |k| hash.include?(k) } hash = hash.dup keys.each { |k| hash.delete(k) } end hash end def only(hash, *keys) return hash if hash.empty? ret = {} keys.each { |k| ret[k] = hash[k] } ret end end end moneta-1.5.2/lib/moneta/version.rb000066400000000000000000000001271433316074200170500ustar00rootroot00000000000000module Moneta # Moneta version number # @api public VERSION = '1.5.2'.freeze end moneta-1.5.2/lib/moneta/weak.rb000066400000000000000000000016651433316074200163220ustar00rootroot00000000000000module Moneta # Adds weak create support to the underlying store # # @note The create method will not be thread or multi-process safe (this is meant by weak) # @api public class WeakCreate < Proxy include CreateSupport # @param [Moneta store] adapter The underlying store # @param [Hash] options def initialize(adapter, options = {}) raise 'Store already supports feature :create' if adapter.supports?(:create) super end end # Adds weak increment support to the underlying store # # @note The increment method will not be thread or multi-process safe (this is meant by weak) # @api public class WeakIncrement < Proxy include IncrementSupport # @param [Moneta store] adapter The underlying store # @param [Hash] options def initialize(adapter, options = {}) raise 'Store already supports feature :increment' if adapter.supports?(:increment) super end end end moneta-1.5.2/lib/moneta/weak_each_key.rb000066400000000000000000000034731433316074200201510ustar00rootroot00000000000000require 'set' module Moneta # Adds weak key enumeration support to the underlying store # # @note This class wraps methods that store and retrieve entries in order to # track which keys are in the store, and uses this list when doing key # traversal. This means that {#each_key each_key} will only yield keys # which have been accessed previously via the present store object. This # wrapper is therefore best suited to adapters which are not persistent, and # which cannot be shared. # # @api public class WeakEachKey < Wrapper supports :each_key # @param [Moneta store] adapter The underlying store # @param [Hash] options def initialize(adapter, options = {}) raise 'Store already supports feature :each_key' if adapter.supports?(:each_key) @all_keys = Set.new super end # (see Proxy#each_key) def each_key return enum_for(:each_key) { all_keys.size } unless block_given? all_keys.each { |key| yield key } self end protected attr_reader :all_keys def wrap(name, *args) case name when :create, :store, :increment, :create each_key_save(args[0]) yield when :key? if found = yield each_key_save(args[0]) else all_keys.delete(args[0]) end found when :load key?(*args) yield when :delete all_keys.delete(args[0]) yield when :clear, :close all_keys.clear yield when :values_at, :fetch_values, :slice args[0].each { |key| key?(key) } yield when :merge! args[0].each { |key, _| each_key_save(key) } yield else yield end end def each_key_save(key) @all_keys = Set.new(@all_keys).add(key) end end end moneta-1.5.2/lib/moneta/wrapper.rb000066400000000000000000000033151433316074200170450ustar00rootroot00000000000000module Moneta # Wraps the calls to the adapter # @api public class Wrapper < Proxy # (see Proxy#key?) def key?(key, options = {}) wrap(:key?, key, options) { super } end # (see Proxy#load) def load(key, options = {}) wrap(:load, key, options) { super } end # (see Proxy#store) def store(key, value, options = {}) wrap(:store, key, value, options) { super } end # (see Proxy#delete) def delete(key, options = {}) wrap(:delete, key, options) { super } end # (see Proxy#increment) def increment(key, amount = 1, options = {}) wrap(:increment, key, amount, options) { super } end # (see Proxy#create) def create(key, value, options = {}) wrap(:create, key, value, options) { super } end # (see Proxy#clear) def clear(options = {}) wrap(:clear, options) { super } end # (see Proxy#close) def close wrap(:close) { super } end # (see Proxy#features) def features wrap(:features) { super } end # (see Proxy#each_key) def each_key(&block) wrap(:each_key) { super } end # (see Proxy#values_at) def values_at(*keys, **options) wrap(:values_at, keys, options) { super } end # (see Proxy#fetch_values) def fetch_values(*keys, **options, &defaults) wrap(:fetch_values, keys, options, defaults) { super } end # (see Proxy#slice) def slice(*keys, **options) wrap(:slice, keys, options) { super } end # (see Proxy#merge!) def merge!(pairs, options = {}) wrap(:merge!, pairs, options) { super } end # (see Proxy#config) def config wrap(:config) { super } end end end moneta-1.5.2/lib/rack/000077500000000000000000000000001433316074200144735ustar00rootroot00000000000000moneta-1.5.2/lib/rack/cache/000077500000000000000000000000001433316074200155365ustar00rootroot00000000000000moneta-1.5.2/lib/rack/cache/moneta.rb000066400000000000000000000036661433316074200173610ustar00rootroot00000000000000require 'moneta' require 'rack/cache/key' require 'rack/cache/meta_store' require 'rack/cache/entity_store' module Rack module Cache # @api public Moneta = {} # @api private module MonetaResolver include Rack::Utils def resolve(uri) cache = Rack::Cache::Moneta[uri.to_s.sub(%r{^moneta://}, '')] ||= begin options = parse_query(uri.query).map do |key, value| [key.to_sym, case value when 'true' true when 'false' false else value end] end ::Moneta.new(uri.host.to_sym, options.to_h) end new(cache) end end class MetaStore # @api public class Moneta < MetaStore extend MonetaResolver def initialize(cache) @cache = cache end def read(key) @cache[key] || [] end def write(key, entries) @cache[key] = entries end def purge(key) @cache.delete(key) nil end end # @api public MONETA = Moneta end class EntityStore # @api public class Moneta < EntityStore extend MonetaResolver def initialize(cache) @cache = cache end def open(key) data = read(key) data && [data] end def exist?(key) @cache.key?(key) end def read(key) @cache[key] end def write(body, ttl = 0) buf = StringIO.new key, size = slurp(body) { |part| buf.write(part) } @cache.store(key, buf.string, ttl == 0 ? {} : { expires: ttl }) [key, size] end def purge(key) @cache.delete(key) nil end end # @api public MONETA = Moneta end end end moneta-1.5.2/lib/rack/moneta_cookies.rb000066400000000000000000000037611433316074200200260ustar00rootroot00000000000000require 'moneta' require 'rack/utils' module Rack # A Rack middleware that was made to reuse all moneta transformers # on the cookie hash. # # @example config.ru # # Add Rack::MonetaCookies somewhere in your rack stack # use Rack::MonetaCookies # # run lambda { |env| [200, {}, []] } # # But this doesn't do much # # @example config.ru # # Give it some options # use Rack::MonetaCookies, domain: 'example.com', path: '/path' # # @example config.ru # # Pass it a block like the one passed to Moneta.build # use Rack::MonetaCookies do # use :Transformer, key: :prefix, prefix: 'moneta.' # adapter :Cookie # end # # run lambda { |env| # req = Rack::Request.new(env) # req.cookies #=> is now a Moneta store! # env['rack.request.cookie_hash'] #=> is now a Moneta store! # req.cookies['key'] #=> retrieves 'moneta.key' # req.cookies['key'] = 'value' #=> sets 'moneta.key' # req.cookies.delete('key') #=> removes 'moneta.key' # [200, {}, []] # } # # @api public class MonetaCookies def initialize(app, options = {}, &block) @app, @pool = app, [] if block raise ArgumentError, 'Use either block or options' unless options.empty? @builder = Moneta::Builder.new(&block) else @builder = Moneta::Builder.new { adapter :Cookie, options } end end def call(env) stores = @pool.pop || @builder.build env['rack.moneta_cookies'] = env['rack.request.cookie_hash'] = stores.last env['rack.request.cookie_string'] = env['HTTP_COOKIE'] stores.first.reset(Rack::Utils.parse_query(env['HTTP_COOKIE'])) status, headers, body = @app.call(env) stores.first.cookies.each do |key, cookie| if cookie == nil Rack::Utils.delete_cookie_header!(headers, key) else Rack::Utils.set_cookie_header!(headers, key, cookie) end end @pool << stores [status, headers, body] end end end moneta-1.5.2/lib/rack/moneta_rest.rb000066400000000000000000000037451433316074200173510ustar00rootroot00000000000000require 'moneta' module Rack # A Rack application which provides a REST interface to a Moneta store. # # @example config.ru # map '/moneta' do # run Rack::MonetaRest.new(:Memory) # end # # @example config.ru # # Pass it a block like the one passed to Moneta.build # run Rack::MonetaRest.new do # use :Transformer, value: :zlib # adapter :Memory # end # # @api public class MonetaRest def initialize(store = nil, options = {}, &block) if block raise ArgumentError, 'Use either block or options' unless options.empty? @store = ::Moneta.build(&block) else raise ArgumentError, 'Block or argument store is required' unless @store = store @store = ::Moneta.new(@store, options) if Symbol === @store end end def call(env) key = env['PATH_INFO'][1..-1].to_s case env['REQUEST_METHOD'] when 'HEAD' if key.empty? respond(400, 'Empty key') elsif @store.key?(key) empty(200) else empty(404) end when 'GET' if key.empty? respond(400, 'Empty key') elsif value = @store[key] respond(200, value) else empty(404) end when 'POST', 'PUT' if key.empty? respond(400, 'Empty key') else respond(200, @store[key] = env['rack.input'].read) end when 'DELETE' if key.empty? @store.clear empty(200) else respond(200, @store.delete(key)) end else respond(400, 'Bad method') end rescue => ex respond(500, "Exception: #{ex.message}") end private def empty(status) [status, { 'Content-Type' => 'application/octet-stream', 'Content-Length' => '0' }, []] end def respond(status, value) [status, { 'Content-Type' => 'application/octet-stream', 'Content-Length' => value.bytesize.to_s }, [value]] end end end moneta-1.5.2/lib/rack/moneta_store.rb000066400000000000000000000025431433316074200175230ustar00rootroot00000000000000require 'moneta' module Rack # A Rack middleware that inserts a Moneta store in the environment # and supports per-request caching via the the option `cache: true`. # # @example config.ru # # Add Rack::MonetaStore somewhere in your rack stack # use Rack::MonetaStore, :Memory, cache: true # # run lambda { |env| # env['rack.moneta_store'] # is a Moneta store with per-request caching # } # # @example config.ru # # Pass it a block like the one passed to Moneta.build # use Rack::MonetaStore do # use :Transformer, value: :zlib # adapter :Cookie # end # # run lambda { |env| # env['rack.moneta_store'] # is a Moneta store without caching # } # # @api public class MonetaStore def initialize(app, store = nil, options = {}, &block) @app = app @cache = options.delete(:cache) if block raise ArgumentError, 'Use either block or options' unless options.empty? @store = ::Moneta.build(&block) else raise ArgumentError, 'Block or argument store is required' unless @store = store @store = ::Moneta.new(@store, options) if Symbol === @store end end def call(env) env['rack.moneta_store'] = @cache ? ::Moneta::Cache.new(cache: ::Moneta::Adapters::Memory.new, adapter: @store) : @store @app.call(env) end end end moneta-1.5.2/lib/rack/session/000077500000000000000000000000001433316074200161565ustar00rootroot00000000000000moneta-1.5.2/lib/rack/session/moneta.rb000066400000000000000000000040251433316074200177670ustar00rootroot00000000000000require 'moneta' require 'rack/session/abstract/id' module Rack module Session # Rack::Session::Moneta stores sessions in a Moneta store. # # Example: # # use Rack::Session::Moneta, key: 'rack.session', # domain: 'foo.com', # path: '/', # expire_after: 2592000, # store: Moneta.new(...) # # You can use all options supported by `Rack::Session::Abstract::ID`. # # @api public class Moneta < Abstract::ID attr_reader :mutex, :pool def initialize(app, options = {}, &block) super if block raise ArgumentError, 'Use either block or option :store' if options[:store] @pool = ::Moneta.build(&block) else raise ArgumentError, 'Block or option :store is required' unless @pool = options[:store] @pool = ::Moneta.new(@pool, expires: true) if Symbol === @pool end @pool = ::Moneta::WeakCreate.new(@pool) unless @pool.supports?(:create) @mutex = ::Mutex.new end def generate_sid loop do sid = super break sid unless @pool.key?(sid) end end def get_session(env, sid) with_lock(env) do unless sid && session = @pool[sid] session = {} loop do sid = generate_sid break if @pool.create(sid, session) end end [sid, session] end end def set_session(env, session_id, new_session, options) with_lock(env) do @pool.store(session_id, new_session, options[:expire_after] ? { expires: options[:expire_after] } : {}) session_id end end def destroy_session(env, session_id, options) with_lock(env) do @pool.delete(session_id) generate_sid unless options[:drop] end end def with_lock(env) @mutex.lock if env['rack.multithread'] yield ensure @mutex.unlock if @mutex.locked? end end end end moneta-1.5.2/moneta.gemspec000066400000000000000000000033541433316074200156420ustar00rootroot00000000000000require File.dirname(__FILE__) + '/lib/moneta/version' require 'date' Gem::Specification.new do |s| s.name = 'moneta' s.version = Moneta::VERSION s.date = Date.today.to_s s.authors = ['Daniel Mendler', 'Yehuda Katz', 'Hannes Georg', 'Alastair Pharo'] s.email = %w{mail@daniel-mendler.de wycats@gmail.com hannes.georg@googlemail.com me@asph.dev} s.description = 'A unified interface to key/value stores' s.extra_rdoc_files = %w{README.md SPEC.md LICENSE} s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } s.homepage = 'https://github.com/moneta-rb/moneta' s.licenses = %w(MIT) s.require_paths = %w(lib) s.summary = %{A unified interface to key/value stores, including Redis, Memcached, TokyoCabinet, ActiveRecord and many more} s.metadata = { 'bug_tracker_uri' => 'https://github.com/moneta-rb/moneta/issues', 'changelog_uri' => "https://github.com/moneta-rb/moneta/blob/v#{s.version}/CHANGES", 'documentation_uri' => "https://www.rubydoc.info/gems/moneta/#{s.version}", 'source_code_uri' => "https://github.com/moneta-rb/moneta/tree/v#{s.version}", } s.required_ruby_version = '>= 2.3.0' s.add_development_dependency 'multi_json', '~> 1.15.0' s.add_development_dependency 'parallel_tests', '~> 2.29.2' s.add_development_dependency 'rantly', '~> 1.2.0' s.add_development_dependency 'rspec', '~> 3.0' s.add_development_dependency 'rspec-retry', '~> 0.6.1' s.add_development_dependency 'rubocop', '~> 0.81.0' s.add_development_dependency 'timecop', '~> 0.9.1' end moneta-1.5.2/script/000077500000000000000000000000001433316074200143115ustar00rootroot00000000000000moneta-1.5.2/script/benchmarks000077500000000000000000000400671433316074200163630ustar00rootroot00000000000000#!/usr/bin/env ruby $: << File.join(File.dirname(__FILE__), '..', 'lib') require 'benchmark' require 'moneta' require 'fileutils' require 'active_support' require 'active_support/cache/moneta_store' require_relative '../spec/restserver.rb' class String def random(n) (1..n).map { self[rand(size),1] }.join end end def mean arr arr.sum / arr.length end def stddev arr m = mean(arr) Math.sqrt(mean(arr.map {|s| (s - m) ** 2 })) end class MonetaBenchmarks DIR = __FILE__ + '.tmp' mysql_username = ENV['MONETA_MYSQL_USERNAME'] || 'root' mysql_password = ENV['MONETA_MYSQL_PASSWORD'] mysql_database1 = ENV['MONETA_MYSQL_DATABSASE1'] || 'moneta' mysql_database2 = ENV['MONETA_MYSQL_DATABSASE2'] || 'moneta2' postgres_username = ENV['MONETA_POSTGRES_USERNAME'] || 'postgres' postgres_database1 = ENV['MONETA_POSTGRES_DATABSASE1'] || 'moneta1' postgres_database2 = ENV['MONETA_POSTGRES_DATABSASE1'] || 'moneta2' couch_login = ENV['COUCH_LOGIN'] || 'admin' couch_password = ENV['COUCH_PASSWORD'] || 'password' STORES = [ # SDBM accepts only very short key/value pairs (1k for both) {name: "SDBM", sizes: [:small], options: {file: "#{DIR}/sdbm"}}, # YAML is too slow #{name: "YAML", options: {file: "#{DIR}/yaml"}}, { name: "ActiveRecord (MySQL)", adapter: :ActiveRecord, options: { table: 'activerecord', connection: { adapter: (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), username: mysql_username, database: mysql_database1 } } }, { name: "ActiveRecord (Postgres)", adapter: :ActiveRecord, options: { table: 'activerecord', connection: { adapter: (defined?(JRUBY_VERSION) ? 'jdbcpostgresql' : 'postgresql'), database: postgres_database1, username: postgres_username } } }, { name: "ActiveRecord (Sqlite)", adapter: :ActiveRecord, options: { table: 'activerecord', connection: { adapter: (defined?(JRUBY_VERSION) ? 'jdbcsqlite3' : 'sqlite3'), database: "#{DIR}/activerecord_sqlite.db" } } }, { name: "ActiveSupportCache (Memory)", adapter: :ActiveSupportCache, options: { backend: ::ActiveSupport::Cache::MemoryStore.new } }, { name: "ActiveSupportCache (Redis)", adapter: :ActiveSupportCache, options: { backend: ::ActiveSupport::Cache::RedisCacheStore.new } }, { name: "ActiveSupportCache (Moneta Memory)", adapter: :ActiveSupportCache, options: { backend: ::ActiveSupport::Cache::MonetaStore.new(store: Moneta.new(:Memory)) } }, { name: "ActiveSupportCache (Moneta Redis)", adapter: :ActiveSupportCache, options: { backend: ::ActiveSupport::Cache::MonetaStore.new(store: Moneta.new(:Redis)) } }, {name: "Cassandra"}, {name: "Client (Memory)", adapter: :Client}, { name: "Couch", options: { backend: if defined?(JRUBY_VERSION) require 'faraday/adapter/manticore' ::Faraday.new("http://127.0.0.1:5984/moneta") { |f| f.adapter :manticore } end, login: couch_login, password: couch_password }, clear_options: { compact: true, await_compact: true }, }, { name: "DBM", options: {file: "#{DIR}/dbm"} }, { name: "DataMapper", options: { setup: "mysql://#{mysql_username}:@localhost/#{mysql_database1}", table: 'datamapper' } }, { name: "Daybreak", options: { file: "#{DIR}/daybreak" }, }, { name: "File", options: { dir: "#{DIR}/file" } }, {name: "GDBM", options: {file: "#{DIR}/gdbm"}}, {name: "HBase"}, {name: "HashFile", options: { dir: "#{DIR}/hashfile" }}, {name: "KyotoCabinet", options: { file: "#{DIR}/kyotocabinet.kch" }}, {name: "LRUHash"}, {name: "LevelDB", options: { dir: "#{DIR}/leveldb" }}, {name: "LocalMemCache", options: { file: "#{DIR}/lmc" }}, { name: "LMDB", options: { dir: "#{DIR}/lmdb", writemap: true, mapasync: true, nometasync: true, mapsize: 4096 * 3e2 } }, {name: "MemcachedDalli"}, unless defined?(JRUBY_VERSION) {name: "MemcachedNative"} end, {name: "Memory"}, {name: "Mongo"}, {name: "PStore", options: { file: "#{DIR}/pstore" }}, {name: "Redis"}, { name: "RestClient (Memory)", adapter: :RestClient, options: { url: 'http://127.0.0.1:8808/moneta', backend: if defined?(JRUBY_VERSION) require 'faraday/adapter/manticore' ::Faraday.new("http://127.0.0.1:8808/moneta") { |f| f.adapter :manticore } end } }, {name: "Riak"}, { name: "Sequel (MySQL)", adapter: :Sequel, options: { table: 'sequel', db: (defined?(JRUBY_VERSION) ? "jdbc:mysql://localhost/#{mysql_database1}?user=#{mysql_username}" : "mysql2://#{mysql_username}:@localhost/#{mysql_database1}") } }, { name: "Sequel (Postgres)", adapter: :Sequel, options: if defined?(JRUBY_VERSION) {db: "jdbc:postgresql://localhost/#{postgres_database1}?user=#{postgres_username}"} else { db: "postgres://localhost/#{postgres_database1}", user: postgres_username } end.merge(table: 'sequel') }, { name: "Sequel (HStore)", adapter: :Sequel, options: if defined?(JRUBY_VERSION) {db: "jdbc:postgresql://localhost/#{postgres_database1}?user=#{postgres_username}"} else { db: "postgres://localhost/#{postgres_database1}", user: postgres_username } end.merge(table: 'sequel_hstore', hstore: 'row') }, { name: "Sequel (Sqlite)", adapter: :Sequel, options: { table: 'sequel', db: "#{defined?(JRUBY_VERSION) && 'jdbc:'}sqlite://#{DIR}/sequel" } }, { name: "Sqlite (Memory)", adapter: :Sqlite, options: { file: ':memory:' } }, { name: "Sqlite (File)", adapter: :Sqlite, options: { file: "#{DIR}/sqlite" } }, {name: "TDB", options: { file: "#{DIR}/tdb" }}, {name: "TokyoCabinet", options: { file: "#{DIR}/tokyocabinet" }}, {name: "TokyoTyrant", options: {port: 10431}}, ].compact CONFIGS = { test: { runs: 2, keys: 10, min_key_len: 1, max_key_len: 32, key_dist: :uniform, min_val_len: 0, max_val_len: 256, val_dist: :uniform }, uniform_small: { runs: 3, keys: 1000, min_key_len: 1, max_key_len: 32, key_dist: :uniform, min_val_len: 0, max_val_len: 256, val_dist: :uniform }, uniform_medium: { runs: 3, keys: 1000, min_key_len: 3, max_key_len: 128, key_dist: :uniform, min_val_len: 0, max_val_len: 1024, val_dist: :uniform }, uniform_large: { runs: 3, keys: 100, min_key_len: 3, max_key_len: 128, key_dist: :uniform, min_val_len: 0, max_val_len: 10240, val_dist: :uniform }, normal_small: { runs: 3, keys: 1000, min_key_len: 1, max_key_len: 32, key_dist: :normal, min_val_len: 0, max_val_len: 256, val_dist: :normal }, normal_medium: { runs: 3, keys: 1000, min_key_len: 3, max_key_len: 128, key_dist: :normal, min_val_len: 0, max_val_len: 1024, val_dist: :normal }, normal_large: { runs: 3, keys: 100, min_key_len: 3, max_key_len: 128, key_dist: :normal, min_val_len: 0, max_val_len: 10240, val_dist: :normal }, } DICT = 'ABCDEFGHIJKLNOPQRSTUVWXYZabcdefghijklnopqrstuvwxyz123456789'.freeze module Rand extend self def normal_rand(mean, stddev) # Box-Muller transform theta = 2 * Math::PI * (rand(1e10) / 1e10) scale = stddev * Math.sqrt(-2 * Math.log(1 - (rand(1e10) / 1e10))) [mean + scale * Math.cos(theta), mean + scale * Math.sin(theta)] end def uniform(min, max) rand(max - min) + min end def normal(min, max) mean = (min + max) / 2 stddev = (max - min) / 4 loop do val = normal_rand(mean, stddev) return val.first if val.first >= min && val.first <= max return val.last if val.last >= min && val.last <= max end end end def header (" " * @name_len) + " Minimum Maximum Total Mean Stddev Ops/s" end def separator "=" * header.length end def parallel(&block) if defined?(JRUBY_VERSION) Thread.new(&block) else Process.fork(&block) end end def write_histogram(file, sizes) min = sizes.min delta = sizes.max - min histogram = [] sizes.each do |s| s = 10 * (s - min) / delta histogram[s] ||= 0 histogram[s] += 1 end File.open(file, 'w') do |f| histogram.each_with_index { |n,i| f.puts "#{i*delta/10+min} #{n}" } end end def start_servers @moneta_server_handle = parallel do begin Moneta::Server.new(Moneta.new(:Memory)).run rescue => ex puts "\e[31mFailed to start Moneta server - #{ex.message}\e[0m" end end @restserver_handle = start_restserver(8808) @tokyotyrant_handle = spawn("ttserver -port 10431 -le -log #{DIR}/tokyotyrant.log #{DIR}/tokyotyrant.tch") Signal.trap "INT" do stop_servers exit end sleep 1 # Wait for servers end def stop_servers if @restserver_handle stop_restserver(@restserver_handle) @restserver_handle = nil end case @moneta_server_handle when Thread Thread.kill @moneta_server_handle when Integer Process.kill "TERM", @moneta_server_handle Process.wait @moneta_server_handle end Process.kill "TERM", @tokyotyrant_handle end def test_stores @stores.select! do |spec| adapter = spec[:adapter] || spec[:name].to_sym options = spec[:options] || {} begin if adapter == :DataMapper begin require 'dm-core' DataMapper.setup(:default, adapter: :in_memory) rescue LoadError => ex puts "\e[31mFailed to load DataMapper - #{ex.message}\e[0m" end elsif adapter == :Riak require 'riak' Riak.disable_list_keys_warnings = true end cache = Moneta.new(adapter, options.dup) cache['test'] = 'test' true rescue Exception => ex puts "\e[31m#{spec[:name]} not benchmarked - #{ex.message}\e[0m" false ensure (cache.close rescue nil) if cache end end end def generate_data until @data.size == @config[:keys] key = DICT.random(Rand.send(@config[:key_dist], @config[:min_key_len], @config[:max_key_len])) @data[key] = DICT.random(Rand.send(@config[:val_dist], @config[:min_val_len], @config[:max_val_len])) end key_lens, val_lens = @data.keys.map(&:size), @data.values.map(&:size) @data = @data.to_a write_histogram("#{DIR}/key.histogram", key_lens) write_histogram("#{DIR}/value.histogram", val_lens) puts "\n\e[1m\e[34m#{separator}\n\e[34mComputing keys and values...\n\e[34m#{separator}\e[0m" puts " " * @name_len + %{ Minimum Maximum Total Mean Stddev} puts 'Key Length'.ljust(@name_len) + ' % 8d % 8d % 8d % 8d % 8d' % [key_lens.min, key_lens.max, key_lens.sum, mean(key_lens), stddev(key_lens)] puts 'Value Length'.ljust(@name_len) + ' % 8d % 8d % 8d % 8d % 8d' % [val_lens.min, val_lens.max, val_lens.sum, mean(val_lens), stddev(val_lens)] end def print_config puts "\e[1m\e[36m#{separator}\n\e[36mConfig #{@config_name}\n\e[36m#{separator}\e[0m" @config.each do |k,v| puts '%-16s = %-10s' % [k,v] end end def print_store_stats(name) puts "\n" + header [:write, :read, :sum].each do |i| ops = (1000 * @config[:runs] * @data.size) / @stats[name][i].sum line = "%-#{@name_len-1}.#{@name_len-1}s %-5s % 8d % 8d % 8d % 8d % 8d % 8d" % [name, i, @stats[name][i].min, @stats[name][i].max, @stats[name][i].sum, mean(@stats[name][i]), stddev(@stats[name][i]), ops] @summary << [-ops, line << "\n"] if i == :sum puts line end errors = @stats[name][:error].sum if errors > 0 puts "\e[31m%-23.23s % 8d % 8d % 8d % 8d\e[0m" % ['Read errors', @stats[name][:error].min, @stats[name][:error].max, errors, errors / @config[:runs]] else puts "\e[32mNo read errors" end end def benchmark_store(spec) name = spec[:name] adapter = spec[:adapter] || spec[:name].to_sym options = spec[:options] || {} puts "\n\e[1m\e[34m#{separator}\n\e[34m#{name}\n\e[34m#{separator}\e[0m" store = Moneta.new(adapter, options.dup) @stats[name] = { write: [], read: [], sum: [], error: [] } %w(Rehearse Measure).each do |type| state = '' print "%s [%#{2 * @config[:runs]}s] " % [type, state] @config[:runs].times do |run| store.clear(spec[:clear_options] || {}) @data.shuffle! m1 = Benchmark.measure do @data.each {|k,v| store[k] = v } end print "%s[%-#{2 * @config[:runs]}s] " % ["\b" * (2 * @config[:runs] + 3), state << 'W'] @data.shuffle! error = 0 m2 = Benchmark.measure do @data.each do |k, v| error += 1 if v != store[k] end end print "%s[%-#{2 * @config[:runs]}s] " % ["\b" * (2 * @config[:runs] + 3), state << 'R'] if type == 'Measure' @stats[name][:write] << m1.real * 1000 @stats[name][:error] << error @stats[name][:read] << m2.real * 1000 @stats[name][:sum] << (m1.real + m2.real) * 1000 end end end print_store_stats(name) rescue StandardError => ex puts "\n\e[31mFailed to benchmark #{name} - #{ex.message}\e[0m\n" ensure store.close if store end def run_benchmarks @stores.each do |spec| benchmark_store(spec) sleep 1 end end def print_summary puts "\n\e[1m\e[36m#{separator}\n\e[36mSummary #{@config_name}: #{@config[:runs]} runs, #{@data.size} keys\n\e[36m#{separator}\e[0m\n#{header}\n" @summary.sort_by(&:first).each do |entry| puts entry.last end end def initialize(args) @config_name = args.size == 1 ? args.first.to_sym : :uniform_medium unless @config = CONFIGS[@config_name] puts "Configuration #{@config_name} not found" exit end @size = @config_name.to_s.split('_').last.to_sym @stores = if ENV['MONETA_STORES'] store_names = ENV['MONETA_STORES'].split(/,\s*/) STORES.select { |spec| store_names.any? { |name| name == spec[:name] } } elsif ENV['MONETA_STORES_MATCHING'] r = Regexp.new(ENV['MONETA_STORES_MATCHING']) STORES.select { |spec| spec[:name].match(r) } else STORES end.select { |spec| !spec.key?(:sizes) || spec[:sizes].include?(@size) } @name_len = (@stores.map { |spec| spec[:name] }.map(&:length) + ["Value Length".length]).max + 2 # Disable jruby stdout pollution by memcached if defined?(JRUBY_VERSION) require 'java' properties = java.lang.System.getProperties(); properties.put('net.spy.log.LoggerImpl', 'net.spy.memcached.compat.log.SunLogger'); java.lang.System.setProperties(properties); java.util.logging.Logger.getLogger('').setLevel(java.util.logging.Level::OFF) end @stats, @data, @summary = {}, {}, [] end def run FileUtils.rm_rf(DIR) FileUtils.mkpath(DIR) start_servers test_stores print_config generate_data run_benchmarks print_summary stop_servers FileUtils.rm_rf(DIR) end end MonetaBenchmarks.new(ARGV).run moneta-1.5.2/script/contributors000077500000000000000000000005671433316074200170040ustar00rootroot00000000000000#!/usr/bin/env ruby contributors = `git log --format='%aN <%aE>'` .gsub(/hiddenbek/, 'Scott Wadden') .gsub(/Asmod4n/, 'Hendrik Beskow') .lines .uniq .reject { |line| line.match? /asppsa@gmail.com|hannes.georg@xing.com|spotapov|yehuda-katzs-mac|wycats / } .sort { |str1, str2| str1.casecmp(str2) } file = File.open('CONTRIBUTORS', 'w') file << contributors.join moneta-1.5.2/script/memusage000077500000000000000000000011571433316074200160460ustar00rootroot00000000000000#!/usr/bin/env ruby $: << File.join(File.dirname(__FILE__), '..', 'lib') def memusage `pmap #{$$} | tail -1`[10..-1].strip.to_i end def shrink last = memusage loop do GC.start sleep 1 m = memusage break if m == last last = m end end $last_memusage = 0 def stats shrink m = memusage delta = m - $last_memusage $last_memusage = m puts "# #{m}K #{delta >= 0 ? '+' : ''}#{delta}K" end stats %q{require 'moneta' Moneta.new(:Memory) Moneta.new(:File, dir: 'filestore') Moneta.new(:MemcachedNative) Moneta.new(:MemcachedDalli)}.each_line do |line| puts line eval(line) stats end moneta-1.5.2/script/parallel-tests000077500000000000000000000041401433316074200171720ustar00rootroot00000000000000#!/usr/bin/env ruby ENV['PARALLEL_TESTS'] = 'yes' require 'multi_json' def tag_args tags tags.flat_map{ |tag| ['--tag', tag] } end def example_ids tags, specs json = `bundle exec rspec -f j --dry-run #{tag_args(tags).join(' ')} -- #{specs.join(' ')}` data = MultiJson.load(json) data['examples'].map{ |example| example['id'] } end def run(*args) pid = spawn(*args) Signal.trap("INT") { Process.kill("INT", pid) } Process.wait(pid) $? == 0 ensure Signal.trap("INT", "DEFAULT") end tags = ARGV.take_while { |arg| arg[0] != '-' } ARGV.shift(tags.length) opts = [] files = nil while arg = ARGV.shift case arg when '--' files = ARGV break when '--remainder' files = Dir['spec/**/*_spec.rb', 'test/**/*_test.rb'] existing = File.open('.travis.yml').each_line.flat_map do |line| next unless matches = line.match(%r{((?:test|spec)/(?:[\w\.]+/?)*)}) path = matches[1] path[-3..-1] == '.rb' ? path : path + '/**/*.rb' end.compact files -= Dir[*existing] else opts << arg end end files ||= Dir['spec', 'test/**/*_test.rb'] specs, tests = files.partition { |file| file.match /^spec/ } puts "The following specs will be executed:\n\t#{specs.join "\n\t"}\n\n" unless specs.empty? puts "The following tests will be executed:\n\t#{tests.join "\n\t"}\n\n" unless tests.empty? results = [] unless specs.empty? # run all non :isolate examples in parallel results << run(*%w{bundle exec parallel_rspec --}, *opts, *tag_args(tags | %w{~isolate}), '--', *specs) # find the example IDs of the isolate examples to be run in serial ids = example_ids(tags, specs) - example_ids(tags | %w{~isolate}, specs) unless ids.empty? results << run(*%w{bundle exec rspec}, *opts, '--', *ids) end end tests.each do |test| results << run(*%w{bundle exec ruby}, test) end if results.any?{ |result| !result } puts "\e[31m########## MONETA TESTSUITE FAILED ##########\e[0m" exit 1 end puts "\e[32m########## MONETA TESTSUITE SUCCEDED ##########\e[0m" moneta-1.5.2/script/start-couchdb000077500000000000000000000016551433316074200170100ustar00rootroot00000000000000#!/bin/bash set -e # Copied from https://github.com/apache/couchdb-pkg/blob/master/debian/README.Debian COUCHDB_PASSWORD=password echo "couchdb couchdb/mode select standalone couchdb couchdb/mode seen true couchdb couchdb/bindaddress string 127.0.0.1 couchdb couchdb/bindaddress seen true couchdb couchdb/adminpass password ${COUCHDB_PASSWORD} couchdb couchdb/adminpass seen true couchdb couchdb/adminpass_again password ${COUCHDB_PASSWORD} couchdb couchdb/adminpass_again seen true" | sudo debconf-set-selections DEBIAN_FRONTEND=noninteractive sudo apt-get install -y --force-yes couchdb # Reconfigure CouchDB to use delayed commits for speed: http://guide.couchdb.org/draft/performance.html sudo sed -i '/\[couchdb\]/a delayed_commits = true' /etc/couchdb/local.ini # (Re)start couchdb sudo systemctl restart couchdb # Display some info about CouchDB sudo systemctl status couchdb until curl http://localhost:5984/; do sleep 1 done moneta-1.5.2/script/start-hbase000077500000000000000000000022061433316074200164540ustar00rootroot00000000000000#!/bin/sh set -e cd $(dirname $(dirname $0)) mkdir -p hbase mkdir -p zookeeper mkdir -p downloads root=$(pwd) version=$(curl -sS https://downloads.apache.org/hbase/stable/RELEASENOTES.md | grep -oP '(?<=# HBASE\s\s)(\d+\.?)+' | head -n1) echo HBase stable version is $version if [ ! -f downloads/hbase-$version-bin.tar.gz ]; then echo Downloading HBase ... wget -P downloads https://downloads.apache.org/hbase/stable/hbase-$version-bin.tar.gz fi echo Extracting HBase ... cd hbase tar -zxf $root/downloads/hbase-$version-bin.tar.gz echo Configuring HBase ... echo "export JAVA_HOME=/usr" >> hbase-$version/conf/hbase-env.sh tee <<-EOF > hbase-$version/conf/hbase-site.xml hbase.rootdir file://$root/hbase hbase.zookeeper.property.dataDir $root/zookeeper hbase.unsafe.stream.capability.enforce false EOF echo Launching HBase ... ./hbase-$version/bin/start-hbase.sh ./hbase-$version/bin/hbase-daemon.sh start thrift cd $root moneta-1.5.2/script/start-services000077500000000000000000000002421433316074200172130ustar00rootroot00000000000000#!/bin/bash # Install and start CouchDB $(dirname $0)/start-couchdb # Install and start HBase $(dirname $0)/start-hbase # Waiting for servers to start sleep 3 moneta-1.5.2/script/travis-logs000077500000000000000000000007371433316074200165200ustar00rootroot00000000000000#! /usr/bin/env ruby require 'travis' require 'cgi' require 'fileutils' FileUtils.mkpath('logs') `git remote -v | grep origin | head -n1` =~ /github\.com:(.*?)\.git/ repo = Travis::Repository.find($1) build = repo.recent_builds.select {|b| b.finished_at }.first build.jobs.each do |j| c = j.config id = "logs/#{j.allow_failures? ? 'allowed-' : ''}#{j.state}-#{c['rvm']}-#{CGI.escape c['env']}" puts "Downloading #{j.id}" File.open(id, 'w') {|f| f.write(j.log.body) } end moneta-1.5.2/script/update-feature-matrix000077500000000000000000000122161433316074200204560ustar00rootroot00000000000000#!/usr/bin/env ruby # coding: utf-8 require 'rubygems' require 'bundler/setup' Bundler.require(:doc) require 'yaml' output = "\n" output << <<-TAB.lines.map(&:strip).join TAB output << "\n\n" footnotes = { "platform" => <<-EOF, Indicates that the adapter is expected to work on this platform. Most adapters will at least work on MRI, but some are curently considered unstable, in which case they are not supported on any platform. EOF "multi-thread safe" => <<-EOF, Make adapters thread-safe by using `Moneta::Lock` or by passing the option `threadsafe: true` to `Moneta#new`. There is also `Moneta::Pool` which can be used to share a store between multiple threads if the store is multi-process safe. I recommend to add the option `:threadsafe` to ensure thread-safety since for example under JRuby and Rubinius even the basic datastructures are not thread safe due to the lack of a global interpreter lock (GIL). This differs from MRI where some adapters might appear thread safe already but only due to the GIL. EOF "multi-process safe" => <<-EOF, Share a Moneta store between multiple processes using `Moneta::Shared` (See below). EOF "atomic increment" => <<-EOF, If a store provides atomic increment it can be used with `Moneta::Semaphore`. You can add weak `#increment` support using the `Moneta::WeakIncrement` proxy. EOF "atomic create" => <<-EOF, If a store provides atomic creation it can be used with `Moneta::Mutex`. You can add weak `#create` support using the `Moneta::WeakCreate` proxy. EOF "native expires" => <<-EOF, Add expiration support by using `Moneta::Expires` or by passing the option `expires: true` to `Moneta#new`. EOF "bulk read" => <<-EOF, This indicates that there is some performance gain when fetching multiple values at once using `#values_at`/`#fetch_values` or `#slice`. For instance, the `MGET` instruction in Redis, or the ability to retrieve several rows in one query in SQL. EOF "bulk write" => <<-EOF This indicates that there is some performance gain when storing multiple key/value pairs at once using `#merge!`/`#update`. EOF } YAML.parse_stream(File.read(File.join(File.dirname(File.dirname(__FILE__)), 'feature_matrix.yaml'))) do |document| feature_group = document.to_ruby output << %{\n\n} feature_group['notes'].each do |k,v| footnotes[k] = v end feature_group['backends'].each do |backend| output << '' output << "" output << "" features = backend['features'] | (feature_group['features'] || []) features += backend['platforms'] || [] %w{MRI JRuby threadsafe multiprocess increment create expires persist each_key bulk_read bulk_write}.each do |feature| supported = if features.include? feature "yes" elsif backend['unknown'] && backend['unknown'].include?(feature) "unknown" else "no" end note = if backend['notes'] && backend['notes'][feature] "#{footnotes.keys.index(backend['notes'][feature]) + 1}" else '' end mark = case supported when "yes" "✓" when "no" "✗" when "unknown" '?' end colour = case supported when "yes" '#5F5' when "no" '#F44' when "unknown" '#55F' end output << %{} end html_description = if backend['description'] Kramdown::Document.new(backend['description']).to_html.match('

(.*)

')[1] else '' end output << "" output << '' output << "\n\n" end end output << "
AdapterRequired gems MRI support1 JRuby support1 Multi-thread safe2 Multi-process safe3 Atomic increment4 Atomic create5 Native expires6 Persistent Key Traversal Bulk read7 Bulk write8 Description
#{feature_group['group']}
#{backend['adapter']}#{backend['gems'] || '-'}#{mark}#{note}#{html_description}
\n\n" footnotes.each_value.each_with_index do |note, idx| output << "#{idx+1}. #{note.lines.map(&:strip).join(" ")}\n" end readme = File.open('README.md', 'r+') new_readme = readme.read.sub(/(name="backend-matrix".*?\n).*?(------)/m, "\\1\n#{output}\n\\2") readme.rewind readme << new_readme readme.truncate(readme.tell) moneta-1.5.2/spec/000077500000000000000000000000001433316074200137375ustar00rootroot00000000000000moneta-1.5.2/spec/active_support/000077500000000000000000000000001433316074200170065ustar00rootroot00000000000000moneta-1.5.2/spec/active_support/cache_moneta_store_spec.rb000066400000000000000000000210721433316074200241710ustar00rootroot00000000000000require 'moneta' require 'active_support' require 'active_support/cache/moneta_store' require 'ostruct' require_relative '../moneta/adapters/memcached_helper.rb' describe "cache_moneta_store" do before(:all) do @events = [] ActiveSupport::Notifications.subscribe(/^cache_(.*)\.active_support$/) do |*args| @events << ActiveSupport::Notifications::Event.new(*args) end end before(:each) do @events.clear end # All stores should implement this basic behavior. shared_examples :basic_store do let(:rabbit) { OpenStruct.new name: 'bunny' } let(:white_rabbit) { OpenStruct.new color: 'white' } before(:each) do store.clear store.write 'rabbit', rabbit @events.clear end it 'reads the data' do expect(store.read('rabbit')).to eq rabbit end it 'writes the data' do store.write 'rabbit', white_rabbit expect(store.read('rabbit')).to eq white_rabbit end it 'deletes data' do store.delete 'rabbit' expect(store.read('rabbit')).to be_nil end it 'verifies existence of an object in the store' do expect(store.exist?('rabbit')).to be true expect(!!store.exist?('rab-a-dub')).to be false end it 'fetches data' do expect(store.fetch('rabbit')).to eq rabbit expect(store.fetch('rub-a-dub')).to be_nil store.fetch('rub-a-dub') { 'Flora de Cana' } expect(store.fetch('rub-a-dub')).to eq 'Flora de Cana' end it 'reads multiple keys' do store.write 'irish whisky', 'Jameson' result = store.read_multi 'rabbit', 'irish whisky' expect(result['rabbit']).to eq rabbit expect(result['irish whisky']).to eq 'Jameson' end it 'reads multiple keys and returns only the matched ones' do store.delete 'irish whisky' result = store.read_multi 'rabbit', 'irish whisky' expect(result).not_to include('irish whisky') expect(result).to include('rabbit') end it 'fetches multiple keys and fills in the missing ones' do store.delete 'irish whisky' result = store.fetch_multi('rabbit', 'irish whisky') do |k| k + ' was missing' end expect(result['rabbit']).to eq rabbit expect(result['irish whisky']).to eq 'irish whisky was missing' expect(store.fetch('irish whisky')).to eq 'irish whisky was missing' end end shared_examples :expiry do it 'writes the data with expiration time' do store.write 'rabbit', white_rabbit, expires_in: 0.2.second expect(store.read('rabbit')).to eq white_rabbit sleep 0.3 expect(store.read('rabbit')).to be_nil end it 'writes multiple values with expiration time' do store.write_multi({ 'rabbit' => white_rabbit, 'irish whisky' => 'Jameson' }, expires_in: 0.2.second) expect(store.read_multi('rabbit', 'irish whisky')).to eq \ 'rabbit' => white_rabbit, 'irish whisky' => 'Jameson' sleep 0.3 expect(store.read_multi('rabbit', 'irish whisky')).to be_empty end it "sets expiry on cache miss" do store.fetch('rabbit', force: true, expires_in: 0.2.second) { white_rabbit } expect(store.fetch('rabbit')).to eq white_rabbit sleep 0.3 expect(store.fetch('rabbit')).to be_nil end it 'does not set expiry on cache hit' do expect(store.fetch('rabbit', expires_in: 0.2.second) { white_rabbit }).to eq rabbit sleep 0.3 expect(store.fetch('rabbit')).to eq rabbit end end # A store *may* implement this shared_examples :increment_decrement do it 'increments a key' do store.write 'counter', 0, raw: true (1..3).each do |i| expect(store.increment('counter')).to eq i end expect(store.read('counter', raw: true).to_i).to eq 3 end it 'decrements a key' do store.write 'counter', 0, raw: true 3.times { store.increment 'counter' } 2.times { store.decrement 'counter' } expect(store.read('counter', raw: true).to_i).to eq 1 end it 'increments a key by given value' do store.write 'counter', 0, raw: true store.increment 'counter', 3 expect(store.read('counter', raw: true).to_i).to eq 3 end it 'decrements a key by given value' do store.write 'counter', 0, raw: true 3.times { store.increment 'counter' } store.decrement 'counter', 2 expect(store.read('counter', raw: true).to_i).to eq 1 end end shared_examples :basic_instrumentation do it 'notifies on #fetch' do store.fetch('radiohead') { 'House Of Cards' } read = @events.shift expect(read.name).to eq 'cache_read.active_support' expect(read.payload).to include(key: 'radiohead', super_operation: :fetch, hit: false) generate = @events.shift expect(generate.name).to eq 'cache_generate.active_support' expect(generate.payload).to include(key: 'radiohead') write = @events.shift expect(write.name).to eq 'cache_write.active_support' expect(write.payload).to include(key: 'radiohead') end it 'notifies on #read' do store.read 'metallica' read = @events.shift expect(read.name).to eq 'cache_read.active_support' expect(read.payload).to include(key: 'metallica', hit: false) end it 'notifies on #write' do store.write 'depeche mode', 'Enjoy The Silence' write = @events.shift expect(write.name).to eq 'cache_write.active_support' expect(write.payload).to include(key: 'depeche mode') end it 'notifies on #delete' do store.delete 'the new cardigans' delete = @events.shift expect(delete.name).to eq 'cache_delete.active_support' expect(delete.payload).to include(key: 'the new cardigans') end it 'notifies on #exist?' do store.exist? 'the smiths' exist = @events.shift expect(exist.name).to eq 'cache_exist?.active_support' expect(exist.payload).to include(key: 'the smiths') end end # This doesn't seem to be documented at all, so we follow the # behavior of MemCacheStore. shared_examples :increment_decrement_instrumentation do it 'notifies on #increment' do store.increment 'pearl jam' increment = @events.shift expect(increment.name).to eq 'cache_increment.active_support' expect(increment.payload).to eq(key: 'pearl jam', amount: 1) end it 'notifies on #decrement' do store.decrement 'placebo' decrement = @events.shift expect(decrement.name).to eq 'cache_decrement.active_support' expect(decrement.payload).to eq(key: 'placebo', amount: 1) end end describe ActiveSupport::Cache::MonetaStore do shared_examples :moneta_store do include_examples :basic_store include_examples :expiry include_examples :increment_decrement include_examples :basic_instrumentation include_examples :increment_decrement_instrumentation # FIXME: no other store does this -- perhaps this should be # removed. it 'notifies on #clear' do store.clear clear = @events.shift expect(clear.name).to eq 'cache_clear.active_support' expect(clear.payload).to eq(key: nil) end end context "with :Memory store" do let(:store) { described_class.new(store: :Memory) } include_examples :moneta_store end context "with existing :Memory store" do let(:store) { described_class.new(store: ::Moneta.new(:Memory)) } include_examples :moneta_store end context "with Redis store", adapter: :Redis do let(:store) { described_class.new(store: :Redis) } include_examples :moneta_store end end describe ActiveSupport::Cache::MemoryStore do let(:store) { described_class.new } include_examples :basic_store include_examples :expiry include_examples :increment_decrement include_examples :basic_instrumentation end describe ActiveSupport::Cache::MemCacheStore, memcached: true do let(:store) { described_class.new('127.0.0.1:11213') } include_context :start_memcached, 11213 include_examples :basic_store include_examples :expiry include_examples :increment_decrement include_examples :basic_instrumentation include_examples :increment_decrement_instrumentation end describe ActiveSupport::Cache::RedisCacheStore, redis: true do let(:store) { described_class.new(url: "redis:///3") } include_examples :basic_store include_examples :expiry include_examples :increment_decrement include_examples :basic_instrumentation include_examples :increment_decrement_instrumentation end end moneta-1.5.2/spec/features/000077500000000000000000000000001433316074200155555ustar00rootroot00000000000000moneta-1.5.2/spec/features/concurrent_create.rb000066400000000000000000000023201433316074200216040ustar00rootroot00000000000000shared_examples :concurrent_create do # Each thread attempts to create def create_thread(name) Thread.new do s = new_store begin (0...1000).map do |i| s.create(i.to_s, name, expires: false).tap do Thread.pass if rand(100) >= 99 end end ensure s.close end end end it 'have atomic create across multiple threads', isolate: true do names = %w{a b c} # Spawn threads and then group results (lists of true/false values) by # store index (0...1000) results = names .map { |name| create_thread(name) } .map(&:value) .transpose.each_with_index .map { |created_values, i| [i.to_s, created_values] } .to_h # Just a quick sanity check expect(results.length).to eq 1000 # Ensure that for each index, one and only one created value is true expect(results.map { |_, created_values| created_values.inject(:^) }).to all(be true) # Check that the when a call to create returned true, that the store # contains the correct value as a result expect(store.slice(*results.keys).to_h).to eq(results.map do |i, values| [i, names[values.index(true)]] end.to_h) end end moneta-1.5.2/spec/features/concurrent_increment.rb000066400000000000000000000022531433316074200223320ustar00rootroot00000000000000shared_examples :concurrent_increment do def increment_thread(name) Thread.new do s = new_store begin # Create an array where each entry is a list of all the return values # from calling increment for a particular key. increments = (0...100).map { [] } 100.times do 100.times do |j| increments[j] << s.increment(j.to_s, 1, expires: false) Thread.pass if rand(1000) >= 995 end end increments ensure s.close end end end it 'have atomic increment across multiple threads', isolate: true do results = %w{a b c} .map { |name| increment_thread(name) } .map(&:value) .transpose # Now the array is indexed by store key instead of thread # Sanity check expect(results.length).to eq 100 results.each do |ith_values| # ensure that for each pair in the triple there are no overlapping values expect(ith_values.combination(2).map { |a, b| a & b }).to all be_empty # ensure that when joined together they cover the full 1..300 range expect(ith_values.inject(:+)).to contain_exactly(*1..300) end end end moneta-1.5.2/spec/features/create.rb000066400000000000000000000012311433316074200173420ustar00rootroot00000000000000shared_examples :create do it 'creates the given key' do store.create('key','value').should be true store['key'].should == 'value' end it 'creates raw value with the given key' do store.raw.create('key','value').should be true store.raw['key'].should == 'value' end it 'does not create a key if it exists' do store['key'] = 'value' store.create('key','another value').should be false store['key'].should == 'value' end it 'supports Mutex' do a = Moneta::Mutex.new(store, 'mutex') b = Moneta::Mutex.new(store, 'mutex') a.lock.should be true b.try_lock.should be false a.unlock.should be_nil end end moneta-1.5.2/spec/features/create_expires.rb000066400000000000000000000012041433316074200211010ustar00rootroot00000000000000shared_examples :create_expires do it 'creates the given key and expires it' do store.create('key','value', expires: min_ttl).should be true store['key'].should == 'value' advance min_ttl 2.times { advance_next_tick } store.key?('key').should be false end it 'does not change expires if the key exists' do store.store('key', 'value', expires: false).should == 'value' store.create('key','another value', expires: min_ttl).should be false store['key'].should == 'value' advance min_ttl 2.times { advance_next_tick } store['key'].should == 'value' store.key?('key').should be true end end moneta-1.5.2/spec/features/default_expires.rb000066400000000000000000000007511433316074200212700ustar00rootroot00000000000000shared_examples :default_expires do it 'sets the default expiration time', default_expires: true do store['key1'] = 'val1' advance(t_res / 4.0) # sleep less than a single time-space store.key?('key1').should be true store.fetch('key1').should == 'val1' store.load('key1').should == 'val1' advance min_ttl 2.times { advance_next_tick } store.key?('key1').should be false store.fetch('key1').should be_nil store.load('key1').should be_nil end end moneta-1.5.2/spec/features/each_key.rb000066400000000000000000000061321433316074200176540ustar00rootroot00000000000000shared_examples :each_key do shared_examples "enumerable" do it 'returns an empty enum when there are no keys' do expect(each_key.call.count).to eq(0) end it 'returns collection with the stored key/s' do expect { store.store('1st_key', 'value') } .to change { each_key.call.to_a } .from([]) .to(['1st_key']) expect { store.store('2nd_key', 'value') } .to change { each_key.call.to_a.sort } .from(['1st_key']) .to(['1st_key', '2nd_key'].sort) end it 'when a lazy size implementation exist it returns the size of the collection or nil' do expect(store.each_key.size).to eq(nil) | eq(0) if !store.each_key.size.nil? && store.each_key.size.zero? expect { store.store('1st_key', 'value') } .to change { store.each_key.size } .from(0) .to(1) expect { store.store('2nd_key', 'value') } .to change { store.each_key.size } .from(1) .to(2) expect { store.delete('1st_key') } .to change { store.each_key.size } .from(2) .to(1) end end it 'doesn\'t duplicate keys' do expect { 2.times { |i| store.store('a_key', "#{i}_val") } } .to change { each_key.call.to_a } .from([]) .to(['a_key']) end it 'doesn\'t return deleted keys' do store.store('a_key', "a_val") store.store('b_key', "b_val") expect { store.delete('a_key') } .to change { each_key.call.to_a.sort } .from(['a_key', 'b_key'].sort) .to(['b_key']) end it 'allows checking and retrieving entries while enumerating' do store['a'] = 'b' store['c'] = 'd' each_key.call do |k| val = if k == 'a' then 'b' else 'd' end expect(store.key?(k)).to be true expect(store[k]).to eq val expect(store.fetch(k)).to eq val end end end context "when a block is not given" do let(:each_key) do store.method(:each_key) end include_examples 'enumerable' it "returns the store if a block is given to #each" do expect(store.each_key.each.each.each{}).to eq store end end context "when a block is given" do let :each_key do proc do |&block| if block store.each_key(&block) else Enumerator.new do |y| store.each_key(&y.method(:<<)) end end end end include_examples 'enumerable' it 'yields the keys to the block' do # Make a list of keys that we expect to find in the store keys = [] 2.times do |i| key = "key_#{i}" keys << key store.store(key, "#{i}_val") end # Enumerate the store, making store that at each iteration we find one of # the keys we are looking for expect(store.each_key do |k| expect(keys.delete(k)).not_to be_nil end).to eq(store) # To assert that all keys were seen by the block expect(keys).to be_empty end it "returns the store" do expect(store.each_key{}).to eq store end end end moneta-1.5.2/spec/features/expires.rb000066400000000000000000000203061433316074200175620ustar00rootroot00000000000000shared_examples :expires do before do raise "t_res must be <= min_ttl" unless t_res <= min_ttl end # All methods that are used for updating that include an :expires parameter shared_examples :updater_expiry do context "with a positive numeric :expires parameter" do before do updater.call(expires: min_ttl) end it 'causes the value to expire after the given number of seconds' do keys.zip(values).each do |key, value| expect(store.load(key)).to eq value expect(store[key]).to eq value end advance min_ttl 2.times { advance_next_tick } keys.each do |key, value| expect(store.load(key)).to be_nil expect(store[key]).to be_nil end end end shared_examples :updater_no_expires do it 'causes the value not to expire after the given number of seconds' do updater.call(expires: expires) keys.zip(values).each do |key, value| expect(store.load(key)).to eq value expect(store[key]).to eq value end advance min_ttl 2.times { advance_next_tick } keys.zip(values).each do |key, value| expect(store.load(key)).to eq value expect(store[key]).to eq value end end end context "with a zero :expires parameter" do let(:expires) { 0 } include_examples :updater_no_expires end context "with a false :expires parameter" do let(:expires) { false } include_examples :updater_no_expires end end # All methods that are used to for loading, and that include an expire parameter shared_examples :loader_expiry do it "does not affect expiry if the value is not present" do expect(loader.call(expires: min_ttl)).to be_absent expect(loader.call).to be_absent end shared_examples :loader_expires do context 'when passed a positive numeric :expires parameter' do it 'changes the expiry of the value(s) to the given number of seconds' do expect(loader.call).to be_present expect(loader.call(expires: min_ttl + 2 * t_res)).to be_present advance min_ttl advance_next_tick expect(loader.call).to be_present 2.times { advance_next_tick } expect(loader.call).to be_absent end end end context 'with previously stored expiring value(s)' do before do keys.zip(values).each do |key, value| store.store(key, value, expires: min_ttl) end end include_examples :loader_expires shared_examples :loader_no_expires do it "changes the expiry of the value(s) so that they don't expire" do expect(loader.call(expires: expires)).to be_present advance min_ttl advance_next_tick expect(loader.call).to be_present end end context "when passed a zero :expires parameter" do let(:expires) { 0 } include_examples :loader_no_expires end context "when passed false :expires parameter" do let(:expires) { false } include_examples :loader_no_expires end shared_examples :loader_no_effect do it 'does not affect the expiry time' do expect(loader_no_effect.call).to be_present advance min_ttl advance_next_tick expect(loader.call).to be_absent end end context 'when passed a nil :expires parameter' do let(:loader_no_effect) { lambda { loader.call(expires: nil) } } include_examples :loader_no_effect end context 'when not passed an :expires parameter' do let(:loader_no_effect) { loader } include_examples :loader_no_effect end end context "with previously stored not expiring value(s)" do before do keys.zip(values).each do |key, value| store.store(key, value, expires: false) end end include_examples :loader_expires end end describe '#store' do let(:keys) { ['key1'] } let(:values) { ['value1'] } let(:updater) do lambda do |**options| expect(store.store(keys[0], values[0], options)).to eq values[0] end end include_examples :updater_expiry end describe '#load' do let(:keys) { ['key1'] } let(:values) { ['value1'] } let(:loader) do lambda { |**options| store.load('key1', options) } end let(:be_present) { eq 'value1' } let(:be_absent) { be_nil } include_examples :loader_expiry end describe '#key?' do let(:keys) { ['key1'] } let(:values) { ['value1'] } let(:loader) do lambda { |**options| store.key?('key1', options) } end let(:be_present) { be true } let(:be_absent) { be false } include_examples :loader_expiry end describe '#fetch' do let(:keys) { ['key1'] } let(:values) { ['value1'] } let(:be_present) { eq 'value1' } context "with default given as second parameter" do let(:loader) do lambda { |**options| store.fetch('key1', 'missing', options) } end let(:be_absent) { eq 'missing' } include_examples :loader_expiry end context "with default given as a block" do let(:loader) do lambda { |**options| store.fetch('key1', options) { 'missing' } } end let(:be_absent) { eq 'missing' } include_examples :loader_expiry end context "with nil default given" do let(:loader) do lambda { |**options| store.fetch('key1', nil, options) } end let(:be_absent) { be_nil } include_examples :loader_expiry end end describe '#delete' do context 'with an already expired value' do before do store.store('key2', 'val2', expires: min_ttl) expect(store['key2']).to eq 'val2' advance min_ttl advance_next_tick end it 'does not return the expired value' do expect(store.delete('key2')).to be_nil end end end describe '#expires' do it "returns a store with access to the same items" do store.store('persistent_key', 'persistent_value', expires: false) store_expires = store.expires(min_ttl) expect(store_expires['persistent_key']).to eq 'persistent_value' end it "returns a store with default expiry set" do store_expires = store.expires(min_ttl) expect(store_expires.store('key1', 'val1')).to eq 'val1' expect(store_expires['key1']).to eq 'val1' advance min_ttl advance_next_tick expect(store['key1']).to be_nil end end describe '#merge!' do let(:keys) { ['key1', 'key2'] } let(:values) { ['value1', 'value2'] } let(:updater) do lambda do |**options| expect(store.merge!(keys.zip(values), options)).to eq store end end include_examples :updater_expiry end describe '#values_at' do let(:keys) { ['key1', 'key2'] } let(:values) { ['value1', 'value2'] } let(:loader) do lambda { |**options| store.values_at('key1', 'key2', **options) } end let(:be_present) { eq ['value1', 'value2'] } let(:be_absent) { eq [nil, nil] } include_examples :loader_expiry end describe '#fetch_values' do let(:keys) { ['key1', 'key2'] } let(:values) { ['value1', 'value2'] } let(:be_present) { eq ['value1', 'value2'] } context 'with default values given via a block' do let(:loader) do lambda do |**options| store.fetch_values('key1', 'key2', **options) { |k| "#{k} missing" } end end let(:be_absent) { eq ['key1 missing', 'key2 missing'] } include_examples :loader_expiry end context 'without default values given' do let(:loader) do lambda do |**options| store.fetch_values('key1', 'key2', **options) end end let(:be_absent) { eq [nil, nil] } include_examples :loader_expiry end end describe '#slice' do let(:keys) { ['key1', 'key2'] } let(:values) { ['value1', 'value2'] } let(:loader) do lambda { |**options| store.slice('key1', 'key2', **options) } end let(:be_present) { contain_exactly(['key1', 'value1'], ['key2', 'value2']) } let(:be_absent) { be_empty } include_examples :loader_expiry end end moneta-1.5.2/spec/features/features.rb000066400000000000000000000005771433316074200177310ustar00rootroot00000000000000shared_examples :features do it 'should report correct features' do expect(store.features).to contain_exactly(*features) end it 'should have frozen features' do store.features.frozen?.should be true end it 'should have #supports?' do features.each do |f| store.supports?(f).should be true end store.supports?(:unknown).should be false end end moneta-1.5.2/spec/features/increment.rb000066400000000000000000000063641433316074200200770ustar00rootroot00000000000000shared_examples :increment do it 'initializes in #increment with 1' do expect(store.key?('inckey')).to be false expect(store.increment('inckey')).to eq 1 expect(store.key?('inckey')).to be true expect(store.raw['inckey'].to_s).to match(/^1\b/) expect(store.raw.load('inckey').to_s).to match(/^1\b/) expect(store.load('inckey', raw: true).to_s).to match(/^1\b/) expect(store.delete('inckey', raw: true).to_s).to match(/^1\b/) expect(store.key?('inckey')).to be false end it 'initializes in #increment with higher value' do expect(store.increment('inckey', 42)).to eq 42 expect(store.key?('inckey')).to be true expect(store.raw['inckey'].to_s).to match(/^42\b/) expect(store.delete('inckey', raw: true).to_s).to match(/^42\b/) end it 'initializes in #increment with 0' do expect(store.increment('inckey', 0)).to eq 0 expect(store.key?('inckey')).to be true expect(store.raw['inckey'].to_s).to match(/^0\b/) expect(store.delete('inckey', raw: true).to_s).to match(/^0\b/) end it 'initializes in #decrement with 0' do expect(store.decrement('inckey', 0)).to eq 0 expect(store.raw['inckey'].to_s).to match(/^0\b/) end it 'initializes in #decrement with negative value' do expect(store.decrement('inckey', -42)).to eq 42 expect(store.raw['inckey'].to_s).to match(/^42\b/) end it 'supports incrementing existing value by value' do expect(store.increment('inckey')).to eq 1 expect(store.increment('inckey', 42)).to eq 43 expect(store.raw['inckey'].to_s).to match(/^43\b/) end it 'supports decrementing existing value by value' do expect(store.increment('inckey')).to eq 1 expect(store.decrement('inckey')).to eq 0 expect(store.increment('inckey', 42)).to eq 42 expect(store.decrement('inckey', 2)).to eq 40 expect(store.raw['inckey'].to_s).to match(/^40\b/) end it 'supports incrementing existing value by 0' do expect(store.increment('inckey')).to eq 1 expect(store.increment('inckey', 0)).to eq 1 expect(store.raw['inckey'].to_s).to match(/^1\b/) end it 'supports decrementing existing value' do expect(store.increment('inckey', 10)).to eq 10 expect(store.increment('inckey', -5)).to eq 5 expect(store.raw['inckey'].to_s).to match(/^5\b/) expect(store.increment('inckey', -5)).to eq 0 expect(store.raw['inckey'].to_s).to match(/^0\b/) end it 'interprets raw value as integer' do store.store('inckey', '42', raw: true) expect(store.increment('inckey')).to eq 43 expect(store.raw['inckey'].to_s).to match(/^43\b/) end it 'raises error in #increment on non integer value' do store['strkey'] = 'value' expect do store.increment('strkey') end.to raise_error end it 'raises error in #decrement on non integer value' do store['strkey'] = 'value' expect do store.decrement('strkey') end.to raise_error end it 'supports Semaphore' do a = Moneta::Semaphore.new(store, 'semaphore', 2) b = Moneta::Semaphore.new(store, 'semaphore', 2) c = Moneta::Semaphore.new(store, 'semaphore', 2) a.synchronize do expect(a.locked?).to be true b.synchronize do expect(b.locked?).to be true expect(c.try_lock).to be false end end end end moneta-1.5.2/spec/features/marshallable_key.rb000066400000000000000000000023121433316074200213770ustar00rootroot00000000000000shared_examples :marshallable_key do it 'refuses to #[] from keys that cannot be marshalled' do expect do store[Struct.new(:foo).new(:bar)] end.to raise_error(marshal_error) end it 'refuses to load from keys that cannot be marshalled' do expect do store.load(Struct.new(:foo).new(:bar)) end.to raise_error(marshal_error) end it 'refuses to fetch from keys that cannot be marshalled' do expect do store.fetch(Struct.new(:foo).new(:bar), true) end.to raise_error(marshal_error) end it 'refuses to #[]= to keys that cannot be marshalled' do expect do store[Struct.new(:foo).new(:bar)] = 'value' end.to raise_error(marshal_error) end it 'refuses to store to keys that cannot be marshalled' do expect do store.store Struct.new(:foo).new(:bar), 'value' end.to raise_error(marshal_error) end it 'refuses to check for #key? if the key cannot be marshalled' do expect do store.key? Struct.new(:foo).new(:bar) end.to raise_error(marshal_error) end it 'refuses to delete a key if the key cannot be marshalled' do expect do store.delete Struct.new(:foo).new(:bar) end.to raise_error(marshal_error) end end moneta-1.5.2/spec/features/marshallable_value.rb000066400000000000000000000003251433316074200217250ustar00rootroot00000000000000shared_examples :marshallable_value do it 'refuses to store values that cannot be marshalled' do expect do store.store 'key', Struct.new(:foo).new(:bar) end.to raise_error(marshal_error) end end moneta-1.5.2/spec/features/multiprocess.rb000066400000000000000000000003101433316074200206250ustar00rootroot00000000000000shared_examples :multiprocess do it 'supports access by multiple instances/processes' do store['key'] = 'val' store2 = new_store store2['key'].should == 'val' store2.close end end moneta-1.5.2/spec/features/not_create.rb000066400000000000000000000002501433316074200202220ustar00rootroot00000000000000shared_examples :not_create do it 'does not support #create' do expect do store.create('key','value') end.to raise_error(NotImplementedError) end end moneta-1.5.2/spec/features/not_each_key.rb000066400000000000000000000002371433316074200205340ustar00rootroot00000000000000shared_examples :not_each_key do it 'does not support #each_key' do expect do store.each_key end.to raise_error(NotImplementedError) end end moneta-1.5.2/spec/features/not_increment.rb000066400000000000000000000004631433316074200207510ustar00rootroot00000000000000shared_examples :not_increment do it 'does not support #increment' do expect do store.increment('inckey') end.to raise_error(NotImplementedError) end it 'does not support #decrement' do expect do store.increment('inckey') end.to raise_error(NotImplementedError) end end moneta-1.5.2/spec/features/not_persist.rb000066400000000000000000000002461433316074200204550ustar00rootroot00000000000000shared_examples :not_persist do it 'does not persist values' do store['key'] = 'val' store.close @store = nil store['key'].should be_nil end end moneta-1.5.2/spec/features/null.rb000066400000000000000000000041701433316074200170560ustar00rootroot00000000000000shared_examples :null do it 'reads from keys like a Hash' do moneta_property_of(keys: 1).check do |m| store[m.keys[0]].should be_nil store.load(m.keys[0]).should be_nil end end it 'guarantees that the same value is returned when setting a key' do moneta_property_of(keys: 1, values: 1).check do |m| (store[m.keys[0]] = m.values[0]).should equal(m.values[0]) end end it 'returns false from #key? if a key is not available' do moneta_property_of(keys: 1).check(1) do |m| store.key?(m.keys[0]).should be false end end it 'returns nil from delete if a value for a key does not exist' do moneta_property_of(keys: 1).check do |m| store.delete(m.keys[0]).should be_nil end end it 'removes all keys from the store with clear' do moneta_property_of(keys: 2, values: 2).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store.clear.should equal(store) store.key?(m.keys[0]).should be false store.key?(m.keys[1]).should be false end end it 'fetches a key with a default value with fetch, if the key is not available' do moneta_property_of(keys: 1, values: 1).check do |m| store.fetch(m.keys[0], m.values[0]).should == m.values[0] end end it 'fetches a key with a block with fetch, if the key is not available' do moneta_property_of(keys: 1, values: 1).check do |m| store.fetch(m.keys[0]) do |k| k.should equal(m.keys[0]) m.values[0] end.should equal(m.values[0]) end end it 'accepts frozen options' do moneta_property_of(keys: 1, values: 1).check do |m| options = {option1: 1, options2: 2} options.freeze store.clear.should equal(store) store.key?(m.keys[0], options).should be false store.load(m.keys[0], options).should be_nil store.fetch(m.keys[0], 42, options).should == 42 store.fetch(m.keys[0], options) { 42 }.should == 42 store.delete(m.keys[0], options).should be_nil store.clear(options).should equal(store) store.store(m.keys[0], m.values[0], options).should == m.values[0] end end end moneta-1.5.2/spec/features/persist.rb000066400000000000000000000005201433316074200175700ustar00rootroot00000000000000shared_examples :persist do it 'persists values' do moneta_property_of(keys: 1, values: 1).check do |m| new_store.tap do |store| store[m.keys[0]] = m.values[0] store.close end new_store.tap do |store| store[m.keys[0]].should == m.values[0] store.close end end end end moneta-1.5.2/spec/features/returndifferent.rb000066400000000000000000000005341433316074200213120ustar00rootroot00000000000000shared_examples :returndifferent do it 'guarantees that a different value is retrieved' do moneta_property_of(keys: 1, values: 1).check do |m| next if [TrueClass,FalseClass,NilClass,Numeric].any?(&m.values[0].method(:is_a?)) store[m.keys[0]] = m.values[0] store[m.keys[0]].should_not be_equal(m.values[0]) end end end moneta-1.5.2/spec/features/returnsame.rb000066400000000000000000000005111433316074200202640ustar00rootroot00000000000000shared_examples :returnsame do it 'guarantees that the same value is retrieved' do moneta_property_of(keys: 1, values: 1).check do |m| next if [TrueClass, FalseClass, Numeric].any?(&m.values[0].method(:is_a?)) store[m.keys[0]] = m.values[0] store[m.keys[0]].should be_equal(m.values[0]) end end end moneta-1.5.2/spec/features/store.rb000066400000000000000000000204351433316074200172420ustar00rootroot00000000000000shared_examples :store do it 'writes values to keys that like a Hash' do moneta_property_of(keys: 1, values: 1).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[0]].should == m.values[0] store.load(m.keys[0]).should == m.values[0] end end it 'returns true from #key? if a key is available' do moneta_property_of(keys: 1, values: 1).check do |m| store[m.keys[0]] = m.values[0] store.key?(m.keys[0]).should be true end end it 'stores values with #store' do moneta_property_of(keys: 1, values: 1).check do |m| value = m.values[0] store.store(m.keys[0], value).should equal(value) store[m.keys[0]].should == m.values[0] store.load(m.keys[0]).should == m.values[0] end end it 'stores values after clear' do moneta_property_of(keys: 2, values: 2).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store.clear.should equal(store) store[m.keys[0]] = m.values[0] store[m.keys[0]].should == m.values[0] store[m.keys[1]].should be_nil end end it 'removes and returns a value from the backing store via delete if it exists' do moneta_property_of(keys: 1, values: 1).check do |m| store[m.keys[0]] = m.values[0] store.delete(m.keys[0]).should == m.values[0] store.key?(m.keys[0]).should be false end end it 'overwrites existing values' do moneta_property_of(keys: 1, values: 2).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[0]].should == m.values[0] store[m.keys[0]] = m.values[1] store[m.keys[0]].should == m.values[1] end end it 'stores frozen values' do moneta_property_of(keys: 1, values: 1).check do |m| value = m.values[0].freeze (store[m.keys[0]] = value).should equal(value) store[m.keys[0]].should == m.values[0] end end it 'stores frozen keys' do moneta_property_of(keys: 1, values: 1).check do |m| key = m.keys[0].freeze store[key] = m.values[0] store[m.keys[0]].should == m.values[0] end end it 'fetches a key with a default value with fetch, if the key is available' do moneta_property_of(keys: 1, values: 2).check do |m| next if m.values[0].nil? store[m.keys[0]] = m.values[0] store.fetch(m.keys[0], m.values[1]).should == m.values[0] end end it 'does not run the block in fetch if the key is available' do moneta_property_of(keys: 1, values: 1).check do |m| next if m.values[0].nil? store[m.keys[0]] = m.values[0] unaltered = 'unaltered' store.fetch(m.keys[0]) { unaltered = 'altered' } unaltered.should == 'unaltered' end end shared_examples :values_at do |name| it 'retrieves stored values' do moneta_property_of(keys: 3, values: 3).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store[m.keys[2]] = m.values[2] expect(store.public_send(name, m.keys[1], m.keys[2], m.keys[0])).to eq [m.values[1], m.values[2], m.values[0]] store.clear end end it 'returns nil in place of missing values' do moneta_property_of(keys: 3, values: 2).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] expect(store.public_send(name, m.keys[1], m.keys[2], m.keys[0])).to eq [m.values[1], nil, m.values[0]] store.clear end end end describe '#values_at' do include_examples :values_at, :values_at end describe '#fetch_values' do include_examples :values_at, :fetch_values it 'yields to the block, if given, for keys that are not in the store' do moneta_property_of(keys: 4, values: 3).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store[m.keys[2]] = m.values[2] expect do |b| store.fetch_values(m.keys[0], m.keys[1], m.keys[2], m.keys[3], &b) end.to yield_with_args(m.keys[3]) store.clear end end it 'uses the value of the block, if given, for keys that are not in the store' do moneta_property_of(keys: 4, values: 4).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store[m.keys[2]] = m.values[2] expect(store.fetch_values(m.keys[0], m.keys[1], m.keys[2], m.keys[3]) do |key| expect(key).to eq m.keys[3] m.values[3] end).to eq [m.values[0], m.values[1], m.values[2], m.values[3]] store.clear end end it 'raises any error raised in the block' do expect { store.fetch_values('key') { raise 'yarg' } }.to raise_error 'yarg' end end describe '#slice' do it 'returns pairs of stored keys and values' do moneta_property_of(keys: 3, values: 3).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store[m.keys[2]] = m.values[2] expect(store.slice(*[m.keys[0], m.keys[1], m.keys[2]].shuffle).to_a).to \ contain_exactly([m.keys[0], m.values[0]], [m.keys[1], m.values[1]], [m.keys[2], m.values[2]]) store.clear end end it 'does not return pairs for any keys absent from the store' do moneta_property_of(keys: 4, values: 3).check do |m| store[m.keys[0]] = m.values[0] store[m.keys[1]] = m.values[1] store[m.keys[2]] = m.values[2] expect(store.slice(*[m.keys[0], m.keys[1], m.keys[2], m.keys[3]].shuffle).to_a).to \ contain_exactly([m.keys[0], m.values[0]], [m.keys[1], m.values[1]], [m.keys[2], m.values[2]]) store.clear end end end shared_examples :merge! do it 'stores values' do moneta_property_of(keys: 3, values: 3).check do |m| expect(store.public_send(method, pairs.call({ m.keys[0] => m.values[0], m.keys[1] => m.values[1], m.keys[2] => m.values[2] }))).to be store expect(store.key?(m.keys[0])).to be true expect(store[m.keys[0]]).to eq m.values[0] expect(store.key?(m.keys[1])).to be true expect(store[m.keys[1]]).to eq m.values[1] expect(store.key?(m.keys[2])).to be true expect(store[m.keys[2]]).to eq m.values[2] store.clear end end it 'overwrites existing values' do moneta_property_of(keys: 2, values: 3).check do |m| expect(store[m.keys[0]] = m.values[0]).to eq m.values[0] expect(store.public_send(method, pairs.call({ m.keys[0] => m.values[1], m.keys[1] => m.values[2] }))).to be store expect(store.key?(m.keys[0])).to be true expect(store[m.keys[0]]).to eq m.values[1] expect(store.key?(m.keys[1])).to be true expect(store[m.keys[1]]).to eq m.values[2] store.clear end end it 'stores the return value of the block, if given, for keys that will be overwritten' do moneta_property_of(keys: 2, values: 4).check do |m| expect(store[m.keys[0]] = m.values[0]).to eq m.values[0] expect(store.public_send(method, pairs.call({ m.keys[0] => m.values[1], m.keys[1] => m.values[2] })) do |key, old_val, new_val| expect(key).to eq m.keys[0] expect(old_val).to eq m.values[0] expect(new_val).to eq m.values[1] m.values[3] end).to be store expect(store.key?(m.keys[0])).to be true expect(store[m.keys[0]]).to eq m.values[3] expect(store.key?(m.keys[1])).to be true expect(store[m.keys[1]]).to eq m.values[2] store.clear end end it 'raises any error raised in the block' do store['x'] = 'y' expect { store.public_send(method, 'x' => 'v') { raise 'yarg' } }.to raise_error 'yarg' end end shared_examples :merge_or_update do context 'when passed a hash' do let(:pairs) { :itself.to_proc } include_examples :merge! end context 'when passed an array' do let(:pairs) { :to_a.to_proc } include_examples :merge! end context 'when passed a lazy enumerator' do let :pairs do lambda do |hash| Enumerator.new do |y| hash.each(&y.method(:<<)) end.lazy end end include_examples :merge! end end describe '#merge!' do let(:method) { :merge! } include_examples :merge_or_update end describe '#update' do let(:method) { :update } include_examples :merge_or_update end end moneta-1.5.2/spec/features/store_large.rb000066400000000000000000000004571433316074200204160ustar00rootroot00000000000000shared_examples :store_large do it 'should store values up to 32k' do value = 'x' * (32 * 1024) store['large'] = value store['large'].should == value end it 'should store keys up to 128 bytes' do key = 'x' * 128 store[key] = 'value' store[key].should == 'value' end end moneta-1.5.2/spec/features/transform_value.rb000066400000000000000000000024361433316074200213160ustar00rootroot00000000000000shared_examples :transform_value do it 'allows to bypass transformer with :raw' do store['key'] = 'value' load_value(store.load('key', raw: true)).should == 'value' store.store('key', 'value', raw: true) store.load('key', raw: true).should == 'value' store.delete('key', raw: true).should == 'value' end it 'allows to bypass transformer with raw syntactic sugar' do store['key'] = 'value' load_value(store.raw.load('key')).should == 'value' store.raw.store('key', 'value') store.raw['key'].should == 'value' store.raw.load('key').should == 'value' store.raw.delete('key').should == 'value' store.raw['key'] = 'value2' store.raw['key'].should == 'value2' end it 'returns unmarshalled value' do store.store('key', 'unmarshalled value', raw: true) store.load('key', raw: true).should == 'unmarshalled value' end it 'might raise exception on invalid value' do store.store('key', 'unmarshalled value', raw: true) begin store['key'].should == load_value('unmarshalled value') store.delete('key').should == load_value('unmarshalled value') rescue Exception => ex expect do store['key'] end.to raise_error expect do store.delete('key') end.to raise_error end end end moneta-1.5.2/spec/features/transform_value_expires.rb000066400000000000000000000025431433316074200230540ustar00rootroot00000000000000shared_examples :transform_value_expires do it 'allows to bypass transformer with :raw' do store['key'] = 'value' load_value(store.load('key', raw: true)).should == 'value' store['key'] = [1,2,3] load_value(store.load('key', raw: true)).should == [[1,2,3]] store['key'] = nil load_value(store.load('key', raw: true)).should == [nil] store['key'] = false load_value(store.load('key', raw: true)).should be false store.store('key', 'value', expires: 10) load_value(store.load('key', raw: true)).first.should == 'value' load_value(store.load('key', raw: true)).last.should respond_to(:to_int) store.store('key', 'value', raw: true) store.load('key', raw: true).should == 'value' store.delete('key', raw: true).should == 'value' end it 'returns unmarshalled value' do store.store('key', 'unmarshalled value', raw: true) store.load('key', raw: true).should == 'unmarshalled value' end it 'might raise exception on invalid value' do store.store('key', 'unmarshalled value', raw: true) begin store['key'].should == load_value('unmarshalled value') store.delete('key').should == load_value('unmarshalled value') rescue Exception => ex expect do store['key'] end.to raise_error expect do store.delete('key') end.to raise_error end end end moneta-1.5.2/spec/helper.rb000066400000000000000000000273121433316074200155500ustar00rootroot00000000000000require 'rspec/core/formatters/base_text_formatter' require 'moneta' require 'fileutils' require 'tmpdir' ENV['RANTLY_VERBOSE'] ||= '0' require 'rspec/retry' require 'rantly' require 'rantly/rspec_extensions' # rantly/shrinks require 'timecop' class MonetaParallelFormatter < RSpec::Core::Formatters::BaseTextFormatter def start(*args) output.puts colorise_summary("STARTING #{ARGV.join(' ')}") @stopped = false @passed_count = 0 @heartbeat = Thread.new do count = 0 until @stopped if (count += 1) % 60 == 0 output.puts(color("RUNNING #{ARGV.join(' ')} - #{@passed_count} passed, #{failed_examples.size} failures", failed_examples.empty? ? RSpec.configuration.success_color : RSpec.configuration.failure_color)) end sleep 0.5 end end end def example_passed(example) super @passed_count += 1 end def stop @stopped = true @heartbeat.join end def dump_summary(duration, example_count, failure_count, pending_count) @duration = duration @example_count = example_count @failure_count = failure_count @pending_count = pending_count output.puts colorise_summary(summary_line(example_count, failure_count, pending_count)) dump_commands_to_rerun_failed_examples end def summary_line(example_count, failure_count, pending_count) "FINISHED #{ARGV.join(' ')} in #{format_duration(duration)} - #{super}" end end class MonetaSpecs KEYS = { 'nil' => [:choose, nil, 0], 'integer' => :integer, 'float' => :float, 'boolean' => :boolean, 'string' => proc{ sized(range 5, 10){ string(:alnum) } }, 'path' => proc{ array(range 2, 3){ sized(range 5, 10){ string(:alpha) } }.join('/') }, 'binary' => [:string, :cntrl], 'object' => proc{ choose Value.new(:objkey1), Value.new(:objkey2) }, 'hash' => proc{ dict(2){ sized(range 5, 10){ [string(:alnum), string(:alnum)] } } } } VALUES = { 'nil' => [:literal, nil], 'integer' => :integer, 'float' => :float, 'boolean' => :boolean, 'string' => [:string, :alnum], 'binary' => [:string, :cntrl], 'object' => proc{ choose Value.new(:objval1), Value.new(:objval2) }, 'hash' => proc{ dict{ [string(:alnum), array(2){ choose(string(:alnum), integer, dict{ [string(:alnum), integer] }) }] } }, 'smallhash' => proc{ dict(2){ sized(range 5, 10){ [string(:alnum), string(:alnum)] } } } } attr_reader :key, :value, :specs, :features def initialize(options = {}) @specs = options.delete(:specs).to_a @features = @specs & [:expires, :expires_native, :increment, :each_key, :create] @key = options.delete(:key) || %w(object string binary hash boolean nil integer float) @value = options.delete(:value) || %w(object string binary hash boolean nil integer float) end def new(options) self.class.new({specs: specs, key: key, value: value}.merge(options)) end def with_keys(*keys) new(key: self.key | keys.map(&:to_s)) end def without_keys(*keys) new(key: self.key - keys.map(&:to_s)) end def with_values(*values) new(value: self.value | values.map(&:to_s)) end def without_values(*values) new(value: self.value - values.map(&:to_s)) end def without_keys_or_values(*types) without_keys(*types).without_values(*types) end def without_path new(key: key - %w(path)) end def stringvalues_only new(value: %w(string)) end def simplekeys_only new(key: %w(string hash integer)) end def simplevalues_only new(value: %w(string hash integer)) end def without_increment new(specs: specs - [:increment, :concurrent_increment] + [:not_increment]) end def without_large new(specs: specs - [:store_large]).instance_exec do if value.include? 'hash' without_values(:hash).with_values(:smallhash) else self end end end def without_concurrent new(specs: specs - [:concurrent_increment, :concurrent_create]) end def without_persist new(specs: specs - [:persist, :multiprocess, :concurrent_increment, :concurrent_create] + [:not_persist]) end def without_multiprocess new(specs: specs - [:multiprocess, :concurrent_increment, :concurrent_create]) end def with_expires a = specs.dup if a.include?(:transform_value) a.delete(:transform_value) a << :transform_value_expires end a << :create_expires if a.include?(:create) a << :expires new(specs: a) end def with_native_expires a = specs.dup a << :create_expires if a.include?(:create) new(specs: a + [:expires]) end def without_marshallable new(specs: specs - [:marshallable_value, :marshallable_key]) end def without_transform new(specs: specs - [:marshallable_value, :marshallable_key, :transform_value]) end def returnsame new(specs: specs - [:returndifferent] + [:returnsame]) end def without_marshallable_key new(specs: specs - [:marshallable_key]) end def without_marshallable_value new(specs: specs - [:marshallable_value]) end def without_store new(specs: specs - [:store, :store_large, :transform_value, :marshallable_value]) end def with_default_expires new(specs: specs + [:default_expires]) end def with_each_key new(specs: specs - [:not_each_key] | [:each_key]) end def without_create new(specs: specs - [:create, :concurrent_create, :create_expires] + [:not_create]) end end ADAPTER_SPECS = MonetaSpecs.new( specs: [:null, :store, :returndifferent, :increment, :concurrent_increment, :concurrent_create, :persist, :multiprocess, :create, :features, :store_large, :not_each_key], key: %w(string path), value: %w(string path binary)) NATIVE_EXPIRY_SPECS = MonetaSpecs.new( specs: [:create, :expires, :create_expires], key: %w(string path), value: %w(string path binary)) STANDARD_SPECS = MonetaSpecs.new( specs: [:null, :store, :returndifferent, :marshallable_key, :marshallable_value, :transform_value, :increment, :concurrent_increment, :concurrent_create, :persist, :multiprocess, :create, :features, :store_large, :not_each_key]) TRANSFORMER_SPECS = MonetaSpecs.new( specs: [:null, :store, :returndifferent, :transform_value, :increment, :create, :features, :store_large, :not_each_key]) module MonetaHelpers module ClassMethods def moneta_store store_name, options={}, &block name = self.description builder = proc do if block options = instance_exec(&block) end Moneta.new(store_name, options.merge(logger: {file: File.join(tempdir, "#{name}.log")})) end include_context :setup_moneta_store, builder end def moneta_build &block include_context :setup_moneta_store, block end def moneta_loader &block before do @moneta_value_loader = block end end def moneta_specs specs let(:features){ specs.features } let(:keys_meta) do [:branch, *specs.key.map{ |k| MonetaSpecs::KEYS[k] }.compact] end let(:values_meta) do [:branch, *specs.value.map{ |k| MonetaSpecs::VALUES[k] }.compact] end # Used by tests that rely on MySQL. These env vars can be used if you # want to run the tests but don't want to grant root access to moneta let(:mysql_host) { ENV['MYSQL_HOST'] || 'localhost' } let(:mysql_port) { ENV['MYSQL_TCP_PORT'] || '3306' } let(:mysql_socket) { ENV['MYSQL_SOCKET'] } let(:mysql_username) { ENV['MONETA_MYSQL_USERNAME'] || 'root' } let(:mysql_password) { ENV['MONETA_MYSQL_PASSWORD'] } let(:mysql_database1) { ENV['MONETA_MYSQL_DATABASE1'] || 'moneta' } let(:mysql_database2) { ENV['MONETA_MYSQL_DATABASE2'] || 'moneta2' } let(:postgres_username) { ENV['PGUSER'] || 'postgres' } let(:postgres_password) { ENV['PGPASSWORD'] } let(:postgres_database1) { ENV['MONETA_POSTGRES_DATABSASE1'] || 'moneta1' } let(:postgres_database2) { ENV['MONETA_POSTGRES_DATABSASE1'] || 'moneta2' } let(:couch_login) { ENV['COUCH_LOGIN'] || 'admin' } let(:couch_password) { ENV['COUCH_PASSWORD'] || 'password' } let(:redis_host) { ENV.fetch('REDIS_HOST', 'localhost') } let(:redis_port) { ENV.fetch('REDIS_PORT', '6379') } before do store = new_store store.clear store.close end specs.specs.sort.each do |s| context "#{s} feature" do include_examples(s) end end end def use_timecop before { @timecop = true } after { Timecop.return } end end module InstanceMethods def tempdir @moneta_tempdir ||= Dir.mktmpdir end def new_store instance_eval(&@moneta_store_builder) end def store @store ||= new_store end def load_value value if @moneta_value_loader @moneta_value_loader.call value else Marshal.load(value) end end def moneta_property_of(keys: 0, values: 0) keys_meta = self.keys_meta values_meta = self.values_meta property_of do key_values = keys.times.map { call(keys_meta) } guard key_values.uniq.length == key_values.length value_values = values.times.map { call(values_meta) } guard value_values.uniq.length == value_values.length Struct.new(:keys, :values).new(key_values, value_values) end end def advance(seconds) return if seconds < 0 if @timecop Timecop.freeze(Time.now + seconds) else sleep seconds end end def time_till_next(tick) now = Time.now.to_f tick - (now % tick) end # advance to the moment just after a tick. 1e-2 is needed in some # environments (JRuby) to be able to pass the "not in earlier half" test. def advance_next_tick tick = t_res offset = time_till_next(tick) + 1e-2 advance offset raise "not in earlier half of tick" unless Time.now.to_f % tick < tick / 2.0 end end end RSpec.configure do |config| config.verbose_retry = true config.color = true #config.tty = true #config.formatter = ENV['PARALLEL_TESTS'] ? MonetaParallelFormatter : :progress config.silence_filter_announcements = true if ENV['PARALLEL_TESTS'] # Allow "should" syntax as well as "expect" config.expect_with(:rspec) { |c| c.syntax = [:should, :expect] } config.extend MonetaHelpers::ClassMethods config.include MonetaHelpers::InstanceMethods end # FIXME: Get rid of this once raise_error expectations no longer generate # warnings RSpec::Expectations.configuration.on_potential_false_positives = :nothing # Disable jruby stdout pollution by memcached if defined?(JRUBY_VERSION) require 'java' properties = java.lang.System.getProperties(); properties.put('net.spy.log.LoggerImpl', 'net.spy.memcached.compat.log.SunLogger'); java.lang.System.setProperties(properties); java.util.logging.Logger.getLogger('').setLevel(java.util.logging.Level::OFF) end class Value attr_accessor :x def initialize(x) @x = x end def ==(other) Value === other && other.x == x end def eql?(other) Value === other && other.x == x end def hash x.hash end end def marshal_error # HACK: Marshalling structs in rubinius without class name throws # NoMethodError (to_sym). TODO: Create an issue for rubinius! if defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' RUBY_VERSION < '1.9' ? ArgumentError : NoMethodError else TypeError end end RSpec.shared_context :setup_moneta_store do |builder| before do @moneta_store_builder = builder end after do if @store @store.close.should == nil @store = nil end end after :all do if @moneta_tempdir FileUtils.remove_dir(@moneta_tempdir) end end end Dir['./spec/features/*.rb'].each{ |rb| require rb } moneta-1.5.2/spec/moneta/000077500000000000000000000000001433316074200152225ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/000077500000000000000000000000001433316074200170255ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/activerecord/000077500000000000000000000000001433316074200214775ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/activerecord/adapter_activerecord_existing_connection_spec.rb000066400000000000000000000036341433316074200333470ustar00rootroot00000000000000describe 'adapter_activerecord_existing_connection', adapter: :ActiveRecord, mysql: true, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do before :all do require 'active_record' end before do default_env = ActiveRecord::ConnectionHandling::DEFAULT_ENV.call ActiveRecord::Base.configurations = { default_env => { 'adapter' => (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), 'socket' => mysql_socket, 'host' => mysql_host, 'port' => mysql_port, 'database' => mysql_database1, 'username' => mysql_username, 'password' => mysql_password } } ActiveRecord::Base.establish_connection end moneta_build do Moneta::Adapters::ActiveRecord.new(table: 'adapter_activerecord_existing_connection') end moneta_specs ADAPTER_SPECS.with_each_key # This is based on # https://github.com/jjb/rails/blob/ar-connection-management-guide/guides/source/active_record_connection_management.md it "supports use on a forking web server", unsupported: !Process.respond_to?(:fork) do store['a'] = 'b' # Before forking, the connection pool is disconnected so that the # forked processes don't use the same connections. ActiveRecord::Base.connection_pool.disconnect! pids = 8.times.map do Process.fork do # Connection is then reestablished in the forked process ActiveRecord::Base.establish_connection exit 1 unless store['a'] == 'b' store[Process.pid.to_s] = '1' exit 1 unless store[Process.pid.to_s] == '1' end end pids.each do |pid| pid2, status = Process.wait2(pid) expect(pid2).to eq pid expect(status.exitstatus).to eq 0 end # Check that the stores were all operating on the same DB ActiveRecord::Base.establish_connection pids.each do |pid| expect(store[pid.to_s]).to eq '1' end end end moneta-1.5.2/spec/moneta/adapters/activerecord/adapter_activerecord_spec.rb000066400000000000000000000067561433316074200272260ustar00rootroot00000000000000describe 'adapter_activerecord', adapter: :ActiveRecord, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do activerecord_specs = ADAPTER_SPECS.with_values(:nil).with_each_key shared_examples :adapter_activerecord do |specs| moneta_build do Moneta::Adapters::ActiveRecord.new( table: 'adapter_activerecord', connection: connection1) end moneta_specs specs it 'updates an existing key/value' do store['foo/bar'] = '1' store['foo/bar'] = '2' store.with_connection do |conn| count = conn.select_value \ store.table. where(store.table[:k].eq('foo/bar')). project(store.table[:k].count) expect(count).to eq 1 end end it 'supports different tables same database' do store1 = Moneta::Adapters::ActiveRecord.new( table: 'adapter_activerecord1', connection: connection1) store2 = Moneta::Adapters::ActiveRecord.new( table: 'adapter_activerecord2', connection: connection1) store1['key'] = 'value1' store2['key'] = 'value2' store1['key'].should == 'value1' store2['key'].should == 'value2' store1.close store2.close end it 'supports different databases same table' do store1 = Moneta::Adapters::ActiveRecord.new( table: 'adapter_activerecord', connection: connection1) store2 = Moneta::Adapters::ActiveRecord.new( table: 'adapter_activerecord', connection: connection2) store1['key'] = 'value1' store2['key'] = 'value2' store1['key'].should == 'value1' store2['key'].should == 'value2' store1.close store2.close end end context "with MySQL", mysql: true do let(:connection1) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), host: mysql_host, port: mysql_port, socket: mysql_socket, database: mysql_database1, username: mysql_username, password: mysql_password } end let(:connection2) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), host: mysql_host, port: mysql_port, socket: mysql_socket, database: mysql_database2, username: mysql_username, password: mysql_password } end include_examples :adapter_activerecord, activerecord_specs end context "with PostgreSQL", postgres: true do let(:connection1) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcpostgresql' : 'postgresql'), database: postgres_database1, username: postgres_username, password: postgres_password } end let(:connection2) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcpostgresql' : 'postgresql'), database: postgres_database2, username: postgres_username, password: postgres_password } end include_examples :adapter_activerecord, activerecord_specs end context "with SQLite", sqlite: true do let(:connection1) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcsqlite3' : 'sqlite3'), database: File.join(tempdir, 'adapter_activerecord1.db') } end let(:connection2) do { adapter: (defined?(JRUBY_VERSION) ? 'jdbcsqlite3' : 'sqlite3'), database: File.join(tempdir, 'adapter_activerecord2.db') } end include_examples :adapter_activerecord, activerecord_specs.without_concurrent end end moneta-1.5.2/spec/moneta/adapters/activerecord/standard_activerecord_spec.rb000066400000000000000000000012111433316074200273630ustar00rootroot00000000000000describe "standard_activerecord", adapter: :ActiveRecord, mysql: true, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do moneta_store :ActiveRecord do { table: 'standard_activerecord', connection: { adapter: (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), host: mysql_host, port: mysql_port, socket: mysql_socket, database: mysql_database1, username: mysql_username, password: mysql_password } } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/activerecord/standard_activerecord_with_expires_spec.rb000066400000000000000000000014071433316074200321640ustar00rootroot00000000000000describe "standard_activerecord_with_expires", adapter: :ActiveRecord, mysql: true, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :ActiveRecord do { table: 'standard_activerecord_with_expires', connection: { adapter: (defined?(JRUBY_VERSION) ? 'jdbcmysql' : 'mysql2'), host: mysql_host, port: mysql_port, socket: mysql_socket, database: mysql_database1, username: mysql_username, password: mysql_password }, expires: true } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/activesupportcache/000077500000000000000000000000001433316074200227215ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/activesupportcache/adapter_activesupportcache_spec.rb000066400000000000000000000030551433316074200316570ustar00rootroot00000000000000require_relative '../memcached_helper.rb' describe 'adapter_activesupportcache', adapter: :ActiveSupportCache, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do before :all do require 'active_support' require 'active_support/cache/moneta_store' end shared_examples :adapter_activesupportcache do moneta_build do Moneta::Adapters::ActiveSupportCache.new(backend: backend) end moneta_specs ADAPTER_SPECS.without_concurrent.without_create.with_native_expires end context 'using MemoryStore' do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::MemoryStore.new } include_examples :adapter_activesupportcache end context 'using MemCacheStore', memcached: true do let(:t_res) { 1 } let(:min_ttl) { 2 } use_timecop include_context :start_memcached, 11215 let(:backend) { ActiveSupport::Cache::MemCacheStore.new('127.0.0.1:11215') } include_examples :adapter_activesupportcache end context 'using RedisCacheStore', redis: true do let(:t_res) { 1 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::RedisCacheStore.new(url: "redis://#{redis_host}:#{redis_port}/1") } include_examples :adapter_activesupportcache end context 'using MonetaStore' do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::MonetaStore.new(store: Moneta.new(:Memory)) } include_examples :adapter_activesupportcache end end adapter_activesupportcache_with_default_expires_spec.rb000066400000000000000000000033021433316074200360710ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/activesupportcacherequire_relative '../memcached_helper' describe 'adapter_activesupportcache_with_default_expires', adapter: :ActiveSupportCache, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do before :all do require 'active_support' require 'active_support/cache/moneta_store' end shared_examples :adapter_activesupportcache_with_default_expires do moneta_build do Moneta::Adapters::ActiveSupportCache.new(backend: backend, expires: min_ttl) end moneta_specs ADAPTER_SPECS.without_concurrent.without_create.with_native_expires.with_default_expires end context 'using MemoryStore' do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::MemoryStore.new } include_examples :adapter_activesupportcache_with_default_expires end context 'using MemCacheStore', memcached: true do let(:t_res) { 1 } let(:min_ttl) { 2 } use_timecop include_context :start_memcached, 11223 let(:backend) { ActiveSupport::Cache::MemCacheStore.new('127.0.0.1:11223') } include_examples :adapter_activesupportcache_with_default_expires end context 'using RedisCacheStore' do let(:t_res) { 1 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::RedisCacheStore.new(url: "redis://#{redis_host}:#{redis_port}/2") } include_examples :adapter_activesupportcache_with_default_expires end context 'using MonetaStore' do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop let(:backend) { ActiveSupport::Cache::MonetaStore.new(store: Moneta.new(:Memory)) } include_examples :adapter_activesupportcache_with_default_expires end end moneta-1.5.2/spec/moneta/adapters/activesupportcache/standard_activesupportcache_spec.rb000066400000000000000000000006711433316074200320400ustar00rootroot00000000000000describe 'standard_activesupportcache', adapter: :ActiveSupportCache, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('3.0.0') do before :context do require 'active_support' end let(:t_res) { 0.1 } let(:min_ttl) { 0.1 } moneta_store :ActiveSupportCache do { backend: ActiveSupport::Cache::MemoryStore.new } end moneta_specs STANDARD_SPECS.without_create.without_persist.with_native_expires end moneta-1.5.2/spec/moneta/adapters/cassandra/000077500000000000000000000000001433316074200207645ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/cassandra/adapter_cassandra_spec.rb000066400000000000000000000010751433316074200257650ustar00rootroot00000000000000require_relative './helper.rb' describe 'adapter_cassandra', retry: 3, adapter: :Cassandra, unsupported: RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0') do let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :global_cassandra_cluster moneta_build do Moneta::Adapters::Cassandra.new( cluster: cluster, keyspace: 'adapter_cassandra', create_keyspace: { durable_writes: false }) end moneta_specs ADAPTER_SPECS.without_increment.without_create.with_native_expires.with_values(:nil).with_each_key end moneta-1.5.2/spec/moneta/adapters/cassandra/adapter_cassandra_with_default_expires_spec.rb000066400000000000000000000012431433316074200322600ustar00rootroot00000000000000require_relative './helper.rb' describe 'adapter_cassandra_with_default_expires', isolate: true, retry: 3, adapter: :Cassandra, unsupported: RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0') do let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :global_cassandra_cluster moneta_build do Moneta::Adapters::Cassandra.new( cluster: cluster, keyspace: 'adapter_cassandra_with_default_expires', expires: min_ttl, create_keyspace: { durable_writes: false }) end moneta_specs ADAPTER_SPECS.without_increment.without_create.with_native_expires.with_default_expires.with_values(:nil).with_each_key end moneta-1.5.2/spec/moneta/adapters/cassandra/helper.rb000066400000000000000000000006631433316074200225750ustar00rootroot00000000000000# This is used in order to speed up cassandra specs RSpec.shared_context :global_cassandra_cluster do before :all do require 'cassandra' $moneta_cassandra_cluster ||= ::Cassandra.cluster end let(:cluster) { $moneta_cassandra_cluster } end RSpec.configure do |config| config.after :suite do if $moneta_cassandra_cluster $moneta_cassandra_cluster.close $moneta_cassandra_cluster = nil end end end moneta-1.5.2/spec/moneta/adapters/cassandra/standard_cassandra_spec.rb000066400000000000000000000010371433316074200261430ustar00rootroot00000000000000require_relative './helper.rb' describe "standard_cassandra", retry: 3, adapter: :Cassandra, unsupported: RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0') do let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :global_cassandra_cluster moneta_store :Cassandra do { cluster: cluster, keyspace: "standard_cassandra", create_keyspace: { durable_writes: false } } end moneta_specs STANDARD_SPECS.without_increment.without_create.with_native_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/client/000077500000000000000000000000001433316074200203035ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/client/adapter_client_spec.rb000066400000000000000000000004451433316074200246230ustar00rootroot00000000000000require_relative './client_helper.rb' describe 'adapter_client', adapter: :Client do include_context :start_server, port: 9002, backend: ->{ Moneta::Adapters::Memory.new } moneta_build do Moneta::Adapters::Client.new(port: 9002) end moneta_specs ADAPTER_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/client/client_helper.rb000066400000000000000000000012651433316074200234510ustar00rootroot00000000000000RSpec.shared_context :start_server do |**options| before :context do begin options.each do |key, value| options[key] = instance_exec(&value) if value.respond_to? :call end backend = options.delete(:backend) @server = Moneta::Server.new(backend, options) @thread = Thread.new { @server.run } sleep 0.1 until @server.running? rescue Exception => ex puts "Failed to start server - #{ex.message}" tries ||= 0 tries += 1 timeout = options[:timeout] || Moneta::Server.config_defaults[:timeout] sleep 1 tries < 3 ? retry : raise end end after :context do @server&.stop @thread&.join end end moneta-1.5.2/spec/moneta/adapters/client/standard_client_tcp_spec.rb000066400000000000000000000012521433316074200256460ustar00rootroot00000000000000require_relative './client_helper.rb' describe "standard_client_tcp", adapter: :Client do include_context :start_server, port: 9003, backend: ->{ Moneta::Adapters::Memory.new } moneta_store :Client, port: 9003 moneta_specs STANDARD_SPECS.with_each_key it 'supports multiple clients' do store['shared_key'] = 'shared_val' threads = (1..32).map do |i| Thread.new do client = new_store (1..31).each do |j| client['shared_key'].should == 'shared_val' client["key-\#{j}-\#{i}"] = "val-\#{j}-\#{i}" client["key-\#{j}-\#{i}"].should == "val-\#{j}-\#{i}" end end end threads.map(&:join) end end moneta-1.5.2/spec/moneta/adapters/client/standard_client_unix_spec.rb000066400000000000000000000014661433316074200260520ustar00rootroot00000000000000require_relative './client_helper.rb' describe "standard_client_unix", adapter: :Client do include_context :start_server, backend: ->{ Moneta::Adapters::Memory.new }, socket: ->{ File.join(tempdir, 'standard_client_unix') } moneta_store :Client do { socket: File.join(tempdir, 'standard_client_unix') } end moneta_specs STANDARD_SPECS.with_each_key it 'supports multiple clients' do store['shared_key'] = 'shared_val' threads = (1..32).map do |i| Thread.new do client = new_store (1..31).each do |j| client['shared_key'].should == 'shared_val' client["key-\#{j}-\#{i}"] = "val-\#{j}-\#{i}" client["key-\#{j}-\#{i}"].should == "val-\#{j}-\#{i}" end end end threads.map(&:join) end end moneta-1.5.2/spec/moneta/adapters/cookie/000077500000000000000000000000001433316074200202765ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/cookie/adapter_cookie_spec.rb000066400000000000000000000002631433316074200246070ustar00rootroot00000000000000describe 'adapter_cookie', adapter: :Cookie do moneta_build do Moneta::Adapters::Cookie.new end moneta_specs ADAPTER_SPECS.with_each_key.without_persist.returnsame end moneta-1.5.2/spec/moneta/adapters/couch/000077500000000000000000000000001433316074200201265ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/couch/adapter_couch_spec.rb000066400000000000000000000141121433316074200242650ustar00rootroot00000000000000require_relative '../faraday_helper.rb' describe 'adapter_couch', adapter: :Couch do include_context :faraday_adapter moneta_build do Moneta::Adapters::Couch.new(db: 'adapter_couch', adapter: faraday_adapter, login: couch_login, password: couch_password) end moneta_specs ADAPTER_SPECS.without_increment.simplevalues_only.without_path.with_each_key shared_examples :batch do shared_examples :no_batch do it "does not add 'batch=ok' to the query'" do expect(store).not_to receive(:request).with(any_args, hash_including(query: hash_including(batch: 'ok'))) expect(store).to receive(:request) store.public_send(m, *args, options) end end context 'without a :batch option' do let(:options) { {} } include_examples(:no_batch) end context 'with batch: false' do let(:options) { { batch: false } } include_examples(:no_batch) end context 'with batch: true' do let(:options) { { batch: true } } it "adds 'batch=ok' to the query'" do expect(store).to receive(:request).with(instance_of(Symbol), instance_of(String), any_args, hash_including(expect: 202, query: hash_including(batch: 'ok'))) store.public_send(m, *args, options) end end end shared_examples :full_commit do context 'without a :full_commit option' do let(:options) { {} } it "does not add a 'X-Couch-Full-Commit' header'" do expect(store).not_to receive(:request) .with(any_args, hash_including(headers: hash_including('X-Couch-Full-Commit' => instance_of(String)))) expect(store).to receive(:request).ordered store.public_send(m, *args, options) end end context 'with full_commit: true' do let(:options) { { full_commit: true } } it "adds 'X-Couch-Full-Commit: true' to the headers'" do expect(store).to receive(:request) .with(instance_of(Symbol), instance_of(String), any_args, hash_including(headers: hash_including('X-Couch-Full-Commit' => 'true'))) .ordered store.public_send(m, *args, options) end end context 'with full_commit: false' do let(:options) { { full_commit: false } } it "adds 'X-Couch-Full-Commit: false' to the headers'" do expect(store).to receive(:request) .with(instance_of(Symbol), instance_of(String), any_args, hash_including(headers: hash_including('X-Couch-Full-Commit' => 'false'))) .ordered store.public_send(m, *args, options) end end end describe '#store' do let(:m) { :store } let(:args) { ['a', 'b'] } include_examples :batch include_examples :full_commit end describe '#delete' do let(:m) { :delete } let(:args) { ['a'] } before do expect(store).to receive(:request).with(:get, 'a', any_args).ordered do Faraday::Response.new( Faraday::Env.from(status: 200, body: '{"type":"Hash","test":1}', response_headers: { 'ETag' => '"testrev"' })) end end include_examples :batch include_examples :full_commit end describe '#merge!' do let(:m) { :merge! } let(:args) { [{'a' => '1'}] } before do expect(store).to receive(:request).with(:post, '_all_docs', any_args).ordered do { 'rows' => [] } end allow(store).to receive(:request).with(:post, '_bulk_docs', any_args) do [ { "ok" => true, "id" => 'a', "rev" => 'testrev' } ] end end include_examples :full_commit end describe '#clear' do context 'changing full commit behaviour' do let(:m) { :clear } let(:args) { [] } # This will make the clear method proceed to deletion before do responses = [ { 'rows' => [ { 'id' => 'test', 'value' => { 'rev' => 'testrev' } } ] }, { 'rows' => [] } ] expect(store).to receive(:request).at_least(:once).with(:get, '_all_docs', any_args).ordered do responses.shift end end include_examples :full_commit end shared_examples :no_compact do it 'does not post to the _compact endpoint' do expect(store).not_to receive(:post).with('_compact', any_args) store.clear(options) end end context 'without a :compact option' do let(:options) { {} } include_examples :no_compact end context 'with compact: true' do it 'posts to the _compact endpoint' do expect(store).to receive(:post).with('_compact', any_args) store.clear(compact: true) end end context 'with compact: false' do let(:options) { { compact: false } } include_examples :no_compact end context 'with await_compact: true' do it "waits for compaction to complete" do # This simulates an empty DB, so no deletes expect(store).to receive(:get).with('_all_docs', any_args).ordered { { 'rows' => [] } } # Next, compact is called. expect(store).to receive(:post).with('_compact', any_args).ordered # We expect the method to call get the DB info as many times as the true value is returned. expect(store).to receive(:get).twice.with('', any_args).ordered { { 'compact_running' => true } } expect(store).to receive(:get).once.with('', any_args).ordered { { 'compact_running' => false } } store.clear(compact: true, await_compact: true) end end context 'with await_compact: false' do it "does not wait for compaction to complete" do expect(store).to receive(:get).with('_all_docs', any_args).ordered { { 'rows' => [] } } expect(store).to receive(:post).with('_compact', any_args).ordered expect(store).not_to receive(:get).with('', any_args).ordered store.clear(compact: true, await_compact: false) end end end end moneta-1.5.2/spec/moneta/adapters/couch/standard_couch_spec.rb000066400000000000000000000006201433316074200244440ustar00rootroot00000000000000require_relative '../faraday_helper.rb' describe "standard_couch", adapter: :Couch do include_context :faraday_adapter moneta_store :Couch do { db: 'standard_couch', adapter: faraday_adapter, login: couch_login, password: couch_password } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_increment.with_each_key end moneta-1.5.2/spec/moneta/adapters/couch/standard_couch_with_expires_spec.rb000066400000000000000000000010071433316074200272360ustar00rootroot00000000000000require_relative '../faraday_helper.rb' describe "standard_couch_with_expires", adapter: :Couch do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop include_context :faraday_adapter moneta_store :Couch do { db: 'standard_couch_with_expires', adapter: faraday_adapter, expires: true, login: couch_login, password: couch_password } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_increment.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/datamapper/000077500000000000000000000000001433316074200211435ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/datamapper/adapter_datamapper_spec.rb000066400000000000000000000033111433316074200263160ustar00rootroot00000000000000describe 'adapter_datamapper', unsupported: defined?(JRUBY_VERSION) || RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0'), adapter: :DataMapper, mysql: true do before :all do require 'dm-core' # DataMapper needs default repository to be setup DataMapper.setup(:default, adapter: :in_memory) end let :database_uri do "mysql://#{mysql_username}:#{mysql_password}@#{mysql_host}:#{mysql_port}/#{mysql_database1}" + (mysql_socket ? "?socket=#{mysql_socket}" : "") end moneta_build do Moneta::Adapters::DataMapper.new( setup: database_uri, table: "adapter_datamapper" ) end moneta_specs ADAPTER_SPECS.without_increment.with_values(:nil).without_values(:binary) it 'does not cross contaminate when storing' do first = Moneta::Adapters::DataMapper.new( setup: database_uri, table: "datamapper_first" ) first.clear second = Moneta::Adapters::DataMapper.new( repository: :sample, setup: database_uri, table: "datamapper_second" ) second.clear first['key'] = 'value' second['key'] = 'value2' first['key'].should == 'value' second['key'].should == 'value2' end it 'does not cross contaminate when deleting' do first = Moneta::Adapters::DataMapper.new( setup: database_uri, table: "datamapper_first" ) first.clear second = Moneta::Adapters::DataMapper.new( repository: :sample, setup: database_uri, table: "datamapper_second" ) second.clear first['key'] = 'value' second['key'] = 'value2' first.delete('key').should == 'value' first.key?('key').should be false second['key'].should == 'value2' end end moneta-1.5.2/spec/moneta/adapters/datamapper/standard_datamapper_spec.rb000066400000000000000000000013151433316074200265000ustar00rootroot00000000000000describe "standard_datamapper", unsupported: defined?(JRUBY_VERSION) || RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0'), adapter: :DataMapper, mysql: true do before :all do require 'dm-core' # DataMapper needs default repository to be setup DataMapper.setup(:default, adapter: :in_memory) end moneta_store :DataMapper do { setup: "mysql://#{mysql_username}:#{mysql_password}@#{mysql_host}:#{mysql_port}/#{mysql_database1}" + (mysql_socket ? "?socket=#{mysql_socket}" : ""), table: "simple_datamapper" } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_increment end moneta-1.5.2/spec/moneta/adapters/datamapper/standard_datamapper_with_expires_spec.rb000066400000000000000000000015121433316074200312710ustar00rootroot00000000000000describe "standard_datamapper_with_expires", unsupported: defined?(JRUBY_VERSION) || RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0'), adapter: :DataMapper, mysql: true do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop before :all do require 'dm-core' # DataMapper needs default repository to be setup DataMapper.setup(:default, adapter: :in_memory) end moneta_store :DataMapper do { setup: "mysql://#{mysql_username}:#{mysql_password}@#{mysql_host}:#{mysql_port}/#{mysql_database1}" + (mysql_socket ? "?socket=#{mysql_socket}" : ""), table: "simple_datamapper_with_expires", expires: true } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_increment.with_expires end moneta-1.5.2/spec/moneta/adapters/datamapper/standard_datamapper_with_repository_spec.rb000066400000000000000000000014061433316074200320330ustar00rootroot00000000000000describe 'standard_datamapper_with_repository', unsupported: defined?(JRUBY_VERSION) || RUBY_ENGINE == 'ruby' && Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0'), adapter: :DataMapper, mysql: true do before :all do require 'dm-core' # DataMapper needs default repository to be setup DataMapper.setup(:default, adapter: :in_memory) end moneta_store :DataMapper do { repository: :repo, setup: "mysql://#{mysql_username}:#{mysql_password}@#{mysql_host}:#{mysql_port}/#{mysql_database1}" + (mysql_socket ? "?socket=#{mysql_socket}" : ""), table: "simple_datamapper_with_repository" } end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_increment end moneta-1.5.2/spec/moneta/adapters/daybreak/000077500000000000000000000000001433316074200206075ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/daybreak/adapter_daybreak_spec.rb000066400000000000000000000003541433316074200254320ustar00rootroot00000000000000describe 'adapter_daybreak', adapter: :Daybreak do moneta_build do Moneta::Adapters::Daybreak.new(file: File.join(tempdir, "adapter_daybreak")) end moneta_specs ADAPTER_SPECS.without_multiprocess.returnsame.with_each_key end moneta-1.5.2/spec/moneta/adapters/daybreak/standard_daybreak_spec.rb000066400000000000000000000003161433316074200256100ustar00rootroot00000000000000describe 'standard_daybreak', adapter: :Daybreak do moneta_store :Daybreak do {file: File.join(tempdir, "simple_daybreak")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/daybreak/standard_daybreak_with_expires_spec.rb000066400000000000000000000005051433316074200304020ustar00rootroot00000000000000describe 'standard_daybreak_with_expires', adapter: :Daybreak do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :Daybreak do {file: File.join(tempdir, "simple_daybreak_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/dbm/000077500000000000000000000000001433316074200175675ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/dbm/adapter_dbm_spec.rb000066400000000000000000000003631433316074200233720ustar00rootroot00000000000000describe 'adapter_dbm', unsupported: defined?(JRUBY_VERSION), adapter: :DBM do moneta_build do Moneta::Adapters::DBM.new(file: File.join(tempdir, "adapter_dbm")) end moneta_specs ADAPTER_SPECS.with_each_key.without_multiprocess end moneta-1.5.2/spec/moneta/adapters/dbm/standard_dbm_spec.rb000066400000000000000000000003401433316074200235450ustar00rootroot00000000000000describe 'standard_dbm', unsupported: defined?(JRUBY_VERSION), adapter: :DBM do moneta_store :DBM do {file: File.join(tempdir, "simple_dbm")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/dbm/standard_dbm_with_expires_spec.rb000066400000000000000000000005371433316074200263470ustar00rootroot00000000000000describe 'standard_dbm_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :DBM do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :DBM do { file: File.join(tempdir, "simple_dbm_with_expires"), expires: true } end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/faraday_helper.rb000066400000000000000000000003621433316074200223210ustar00rootroot00000000000000RSpec.shared_context :faraday_adapter do before :context do #require 'faraday/adapter/manticore' if defined?(JRUBY_VERSION) end let(:faraday_adapter) do #defined?(JRUBY_VERSION) ? :manticore : :net_http :net_http end end moneta-1.5.2/spec/moneta/adapters/file/000077500000000000000000000000001433316074200177445ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/file/adapter_file_spec.rb000066400000000000000000000002731433316074200237240ustar00rootroot00000000000000describe 'adapter_file', adapter: :File do moneta_build do Moneta::Adapters::File.new(dir: File.join(tempdir, "adapter_file")) end moneta_specs ADAPTER_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/file/standard_file_spec.rb000066400000000000000000000002501433316074200240770ustar00rootroot00000000000000describe 'standard_file', adapter: :File do moneta_store :File do {dir: File.join(tempdir, "simple_file")} end moneta_specs STANDARD_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/file/standard_file_with_expires_spec.rb000066400000000000000000000004371433316074200267000ustar00rootroot00000000000000describe 'standard_file_with_expires', adapter: :File do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :File do {dir: File.join(tempdir, "simple_file_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/fog/000077500000000000000000000000001433316074200176005ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/fog/adapter_fog_spec.rb000066400000000000000000000010421433316074200234070ustar00rootroot00000000000000describe 'adapter_fog', adapter: :Fog do before :all do require 'fog/aws' Fog.mock! end moneta_build do Moneta::Adapters::Fog.new(aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'adapter_fog') end # Fog returns same object in mocking mode (in-memory store) moneta_specs ADAPTER_SPECS.without_increment.without_create.returnsame end moneta-1.5.2/spec/moneta/adapters/fog/standard_fog_spec.rb000066400000000000000000000007261433316074200235770ustar00rootroot00000000000000describe 'standard_fog', adapter: :Fog do before :all do require 'fog/aws' # Put Fog into testing mode Fog.mock! end moneta_store :Fog, {aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'standard_fog'} moneta_specs STANDARD_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/fog/standard_fog_with_expires_spec.rb000066400000000000000000000011661433316074200263700ustar00rootroot00000000000000describe 'standard_fog_with_expires', adapter: :Fog do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop before :all do require 'fog/aws' # Put Fog into testing mode Fog.mock! end moneta_store :Fog, {aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'standard_fog_with_expires', expires: true} moneta_specs STANDARD_SPECS.without_increment.without_create.with_expires end moneta-1.5.2/spec/moneta/adapters/gdbm/000077500000000000000000000000001433316074200177365ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/gdbm/adapter_gdbm_spec.rb000066400000000000000000000003211433316074200237020ustar00rootroot00000000000000describe 'adapter_gdbm', adapter: :GDBM do moneta_build do Moneta::Adapters::GDBM.new(file: File.join(tempdir, "adapter_gdbm")) end moneta_specs ADAPTER_SPECS.with_each_key.without_multiprocess end moneta-1.5.2/spec/moneta/adapters/gdbm/standard_gdbm_spec.rb000066400000000000000000000002761433316074200240730ustar00rootroot00000000000000describe 'standard_gdbm', adapter: :GDBM do moneta_store :GDBM do {file: File.join(tempdir, "simple_gdbm")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/gdbm/standard_gdbm_with_expires_spec.rb000066400000000000000000000004651433316074200266650ustar00rootroot00000000000000describe 'standard_gdbm_with_expires', adapter: :GDBM do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :GDBM do {file: File.join(tempdir, "simple_gdbm_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/hashfile/000077500000000000000000000000001433316074200206105ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/hashfile/standard_hashfile_spec.rb000066400000000000000000000002521433316074200256110ustar00rootroot00000000000000describe 'standard_hashfile', adapter: :HashFile do moneta_store :HashFile do {dir: File.join(tempdir, "simple_hashfile")} end moneta_specs STANDARD_SPECS end moneta-1.5.2/spec/moneta/adapters/hashfile/standard_hashfile_with_expires_spec.rb000066400000000000000000000004461433316074200304100ustar00rootroot00000000000000describe 'standard_hashfile_with_expires', adapter: :HashFile do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :HashFile do {dir: File.join(tempdir, "simple_hashfile_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.with_expires end moneta-1.5.2/spec/moneta/adapters/hbase/000077500000000000000000000000001433316074200201075ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/hbase/adapter_hbase_spec.rb000066400000000000000000000002761433316074200242350ustar00rootroot00000000000000describe 'adapter_hbase', unstable: true, adapter: :HBase do moneta_build do Moneta::Adapters::HBase.new(table: 'adapter_hbase') end moneta_specs ADAPTER_SPECS.without_create end moneta-1.5.2/spec/moneta/adapters/hbase/standard_hbase_spec.rb000066400000000000000000000002361433316074200244110ustar00rootroot00000000000000describe 'standard_hbase', unstable: true, adapter: :HBase do moneta_store :HBase, {table: "simple_hbase"} moneta_specs STANDARD_SPECS.without_create end moneta-1.5.2/spec/moneta/adapters/hbase/standard_hbase_with_expires_spec.rb000066400000000000000000000003711433316074200272030ustar00rootroot00000000000000describe 'standard_hbase_with_expires', unstable: true, adapter: :HBase do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :HBase, {table: "simple_hbase", expires: true} moneta_specs STANDARD_SPECS.with_expires end moneta-1.5.2/spec/moneta/adapters/kyotocabinet/000077500000000000000000000000001433316074200215205ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/kyotocabinet/adapter_kyotocabinet_spec.rb000066400000000000000000000005451433316074200272560ustar00rootroot00000000000000describe 'adapter_kyotocabinet', unsupported: defined?(JRUBY_VERSION) || ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('2.7.0'), adapter: :KyotoCabinet do moneta_build do Moneta::Adapters::KyotoCabinet.new(file: File.join(tempdir, "adapter_kyotocabinet.kch")) end moneta_specs ADAPTER_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/kyotocabinet/standard_kyotocabinet_spec.rb000066400000000000000000000005221433316074200274310ustar00rootroot00000000000000describe 'standard_kyotocabinet', unsupported: defined?(JRUBY_VERSION) || ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('2.7.0'), adapter: :KyotoCabinet do moneta_store :KyotoCabinet do {file: File.join(tempdir, "simple_kyotocabinet.kch")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/kyotocabinet/standard_kyotocabinet_with_expires_spec.rb000066400000000000000000000007321433316074200322260ustar00rootroot00000000000000describe 'standard_kyotocabinet_with_expires', unsupported: defined?(JRUBY_VERSION)|| ::Gem::Version.new(RUBY_ENGINE_VERSION) >= ::Gem::Version.new('2.7.0'), adapter: :KyotoCabinet do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :KyotoCabinet do { file: File.join(tempdir, "simple_kyotocabinet_with_expires.kch"), expires: true } end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/leveldb/000077500000000000000000000000001433316074200204425ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/leveldb/adapter_leveldb_spec.rb000066400000000000000000000004201433316074200251120ustar00rootroot00000000000000describe 'adapter_leveldb', unsupported: defined?(JRUBY_VERSION), adapter: :LevelDB do moneta_build do Moneta::Adapters::LevelDB.new(dir: File.join(tempdir, "adapter_leveldb")) end moneta_specs ADAPTER_SPECS.with_each_key.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/leveldb/standard_leveldb_spec.rb000066400000000000000000000003611433316074200252760ustar00rootroot00000000000000describe 'standard_leveldb', unsupported: defined?(JRUBY_VERSION), adapter: :LevelDB do moneta_store :LevelDB do {dir: File.join(tempdir, "standard_leveldb")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/leveldb/standard_leveldb_with_expires_spec.rb000066400000000000000000000005501433316074200300700ustar00rootroot00000000000000describe 'standard_leveldb_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :LevelDB do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :LevelDB do {dir: File.join(tempdir, "standard_leveldb_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/lmdb/000077500000000000000000000000001433316074200177435ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/lmdb/adapter_lmdb_spec.rb000066400000000000000000000003641433316074200237230ustar00rootroot00000000000000describe 'adapter_lmdb', unsupported: defined?(JRUBY_VERSION), adapter: :LMDB do moneta_build do Moneta::Adapters::LMDB.new(dir: File.join(tempdir, "adapter_lmdb")) end moneta_specs ADAPTER_SPECS.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/lmdb/adapter_lmdb_with_db_spec.rb000066400000000000000000000004301433316074200254150ustar00rootroot00000000000000describe 'adapter_lmdb_with_db', unsupported: defined?(JRUBY_VERSION), adapter: :LMDB do moneta_build do Moneta::Adapters::LMDB.new(dir: File.join(tempdir, "adapter_lmdb"), db: "adapter_lmdb_with_db") end moneta_specs ADAPTER_SPECS.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/lmdb/standard_lmdb_spec.rb000066400000000000000000000003411433316074200240760ustar00rootroot00000000000000describe 'standard_lmdb', unsupported: defined?(JRUBY_VERSION), adapter: :LMDB do moneta_store :LMDB do {dir: File.join(tempdir, "simple_lmdb")} end moneta_specs STANDARD_SPECS.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/lmdb/standard_lmdb_with_expires_spec.rb000066400000000000000000000005241433316074200266730ustar00rootroot00000000000000describe 'standard_lmdb_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :LMDB do let(:t_res) { 1 } let(:min_ttl) { t_res } use_timecop moneta_store :LMDB do {dir: File.join(tempdir, "simple_lmdb_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_concurrent.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/localmemcache/000077500000000000000000000000001433316074200216025ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/localmemcache/adapter_localmemcache_spec.rb000066400000000000000000000004311433316074200274140ustar00rootroot00000000000000describe 'adapter_localmemcache', unsupported: defined?(JRUBY_VERSION), adapter: :LocalMemCache do moneta_build do Moneta::Adapters::LocalMemCache.new(file: File.join(tempdir, "adapter_localmemcache")) end moneta_specs ADAPTER_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/localmemcache/standard_localmemcache_spec.rb000066400000000000000000000004061433316074200275760ustar00rootroot00000000000000describe 'standard_localmemcache', unsupported: defined?(JRUBY_VERSION), adapter: :LocalMemCache do moneta_store :LocalMemCache do {file: File.join(tempdir, "simple_localmemcache")} end moneta_specs STANDARD_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/localmemcache/standard_localmemcache_with_expires_spec.rb000066400000000000000000000005751433316074200323770ustar00rootroot00000000000000describe 'standard_localmemcache_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :LocalMemCache do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :LocalMemCache do {file: File.join(tempdir, "simple_localmemcache_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_increment.without_create.with_expires end moneta-1.5.2/spec/moneta/adapters/lruhash/000077500000000000000000000000001433316074200204735ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/lruhash/adapter_lruhash_spec.rb000066400000000000000000000107741433316074200252110ustar00rootroot00000000000000describe 'adapter_lruhash', adapter: :LRUHash do moneta_build do Moneta::Adapters::LRUHash.new end moneta_specs ADAPTER_SPECS.with_each_key.without_persist.returnsame it 'deletes oldest' do store = Moneta::Adapters::LRUHash.new(max_size: 10) store[0] = 'y' (1..1000).each do |i| store[i] = 'x' store[0].should == 'y' store.instance_variable_get(:@backend).size.should == [10, i+1].min (0...[9, i-1].min).each do |j| store.instance_variable_get(:@backend)[i-j].should_not be_nil end store.key?(i-9).should be false if i > 9 end end it 'adds a value that is the same as max_size' do store = Moneta::Adapters::LRUHash.new(max_size: 21) store[:a_key] = 'This is 21 bytes long' store[:a_key].should eq('This is 21 bytes long') end it 'does not add a value that is larger than max_size' do store = Moneta::Adapters::LRUHash.new(max_size: 20) store[:too_long] = 'This is 21 bytes long' store[:too_long].should be_nil end it 'removes an existing key that is replaced by an item that is larger than max_size' do store = Moneta::Adapters::LRUHash.new(max_size: 20) store[:a_key] = 'This will fit' store[:a_key] = 'This is 21 bytes long' store[:a_key].should be_nil end it 'does not add a value that is larger than max_size, when max_value is explicitly missing' do store = Moneta::Adapters::LRUHash.new(max_size: 20, max_value: nil) store[:too_long] = 'This is 21 bytes long' store[:too_long].should be_nil end it 'does not add a value that is larger than max_size, even if max_value is larger than max_size' do store = Moneta::Adapters::LRUHash.new(max_size: 20, max_value: 25) store[:too_long] = 'This is 21 bytes long' store[:too_long].should be_nil end it 'adds a value that is as large as the default max_size when max_size is missing' do store = Moneta::Adapters::LRUHash.new expect(store.config.max_size).to eq Moneta::Adapters::LRUHash.config_defaults[:max_size] large_item = 'Really big' allow(large_item).to receive(:bytesize).and_return(Moneta::Adapters::LRUHash.config_defaults[:max_size]) store[:really_big] = large_item store[:really_big].should eq(large_item) end it 'does not add values that are larger than the default max_size when max_size is missing' do store = Moneta::Adapters::LRUHash.new large_item = 'Really big' allow(large_item).to receive(:bytesize).and_return(Moneta::Adapters::LRUHash.config_defaults[:max_size] + 1) store[:really_big] = large_item store[:really_big].should be_nil end it 'adds values that are larger than the default max_size when max_size is nil' do store = Moneta::Adapters::LRUHash.new(max_size: nil) large_item = 'Really big' allow(large_item).to receive(:bytesize).and_return(Moneta::Adapters::LRUHash.config_defaults[:max_size] + 1) store[:really_big] = large_item store[:really_big].should eq(large_item) end it 'adds an individual value that is equal to max_value' do store = Moneta::Adapters::LRUHash.new(max_value: 13) store[:a_key] = '13 bytes long' store[:a_key].should eq('13 bytes long') end it 'does not add a value that is larger than max_value' do store = Moneta::Adapters::LRUHash.new(max_value: 20) store[:too_long] = 'This is 21 bytes long' store[:too_long].should be_nil end it 'removes keys that are replaced by values larger than max_value' do store = Moneta::Adapters::LRUHash.new(max_value: 20) store[:too_long] = 'This will fit' store[:too_long] = 'This is 21 bytes long' store[:too_long].should be_nil end it 'only allows the default number of items when max_count is missing' do defaults = Moneta::Adapters::LRUHash.config_defaults allow(Moneta::Adapters::LRUHash).to receive(:config_defaults).and_return(defaults.merge(max_count: 5)) store = Moneta::Adapters::LRUHash.new(max_value: nil, max_size: nil) (1..6).each { |n| store[n] = n } store.key?(1).should be false store[1].should be_nil store[2].should eq(2) store[6].should eq(6) end it 'adds more values than the default max_count allows when max_count is nil' do defaults = Moneta::Adapters::LRUHash.config_defaults allow(Moneta::Adapters::LRUHash).to receive(:config_defaults).and_return(defaults.merge(max_count: 5)) store = Moneta::Adapters::LRUHash.new(max_count: nil, max_value: nil, max_size: nil) (1..6).each { |n| store[n] = n } store[1].should eq(1) store[2].should eq(2) store[6].should eq(6) end end moneta-1.5.2/spec/moneta/adapters/lruhash/standard_lruhash_spec.rb000066400000000000000000000002121433316074200253530ustar00rootroot00000000000000describe 'standard_lruhash', adapter: :LRUHash do moneta_store :LRUHash moneta_specs STANDARD_SPECS.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/lruhash/standard_lruhash_with_expires_spec.rb000066400000000000000000000003661433316074200301570ustar00rootroot00000000000000describe 'standard_lruhash_with_expires', adapter: :LRUHash do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :LRUHash, {expires: true} moneta_specs STANDARD_SPECS.with_expires.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memcached/000077500000000000000000000000001433316074200207335ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/memcached/adapter_memcached_spec.rb000066400000000000000000000004721433316074200257030ustar00rootroot00000000000000require_relative '../memcached_helper.rb' describe 'adapter_memcached', adapter: :Memcached do include_context :start_memcached, 11216 moneta_build do Moneta::Adapters::Memcached.new(server: "127.0.0.1:11216") end it "is a Memcached adapter" do expect(store).to be_a_memcached_adapter end end moneta-1.5.2/spec/moneta/adapters/memcached/dalli/000077500000000000000000000000001433316074200220205ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/memcached/dalli/adapter_memcached_dalli_spec.rb000066400000000000000000000012461433316074200301350ustar00rootroot00000000000000require_relative '../../memcached_helper.rb' describe 'adapter_memcached_dalli', retry: 3, adapter: :Memcached do # See https://github.com/memcached/memcached/issues/307 let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :start_memcached, 11212 describe 'without default expires' do moneta_build do Moneta::Adapters::MemcachedDalli.new(server: "127.0.0.1:11212") end moneta_specs ADAPTER_SPECS.with_native_expires end describe 'with default expires' do moneta_build do Moneta::Adapters::MemcachedDalli.new(server: '127.0.0.1:11212', expires: min_ttl) end moneta_specs NATIVE_EXPIRY_SPECS.with_default_expires end end moneta-1.5.2/spec/moneta/adapters/memcached/dalli/standard_memcached_dalli_spec.rb000066400000000000000000000004721433316074200303150ustar00rootroot00000000000000require_relative '../../memcached_helper.rb' describe 'standard_memcached_dalli', retry: 3, adapter: :Memcached do let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :start_memcached, 11218 moneta_store :MemcachedDalli, server: "127.0.0.1:11218" moneta_specs STANDARD_SPECS.with_native_expires end moneta-1.5.2/spec/moneta/adapters/memcached/native/000077500000000000000000000000001433316074200222215ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/memcached/native/adapter_memcached_native_spec.rb000066400000000000000000000013331433316074200305340ustar00rootroot00000000000000require_relative '../../memcached_helper.rb' describe 'adapter_memcached_native', isolate: true, unstable: defined?(JRUBY_VERSION), retry: 3, adapter: :Memcached do # See https://github.com/memcached/memcached/issues/307 let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :start_memcached, 11214 describe 'without default expires' do moneta_build do Moneta::Adapters::MemcachedNative.new(server: "127.0.0.1:11214") end moneta_specs ADAPTER_SPECS.with_native_expires end describe 'with default expires' do moneta_build do Moneta::Adapters::MemcachedNative.new(server: '127.0.0.1:11214', expires: min_ttl) end moneta_specs NATIVE_EXPIRY_SPECS.with_default_expires end end moneta-1.5.2/spec/moneta/adapters/memcached/native/standard_memcached_native_spec.rb000066400000000000000000000005371433316074200307210ustar00rootroot00000000000000require_relative '../../memcached_helper.rb' describe 'standard_memcached_native', unstable: defined?(JRUBY_VERSION), retry: 3, adapter: :Memcached do let(:t_res) { 1 } let(:min_ttl) { 2 } include_context :start_memcached, 11219 moneta_store :MemcachedNative, server: "127.0.0.1:11219" moneta_specs STANDARD_SPECS.with_native_expires end moneta-1.5.2/spec/moneta/adapters/memcached/standard_memcached_spec.rb000066400000000000000000000006701433316074200260630ustar00rootroot00000000000000require_relative '../memcached_helper.rb' describe 'standard_memcached', adapter: :Memcached do include_context :start_memcached, 11220 moneta_store :Memcached, server: "127.0.0.1:11220" it "uses one of the Memcached adapters" do # recurse down through adapters adapter = store.adapter while adapter.respond_to?(:adapter) adapter = adapter.adapter end expect(adapter).to be_a_memcached_adapter end end moneta-1.5.2/spec/moneta/adapters/memcached_helper.rb000066400000000000000000000011011433316074200226100ustar00rootroot00000000000000RSpec.shared_context :start_memcached do |port| before :context do @memcached = spawn("memcached -p #{port}") sleep 0.5 end after :context do Process.kill("TERM", @memcached) Process.wait(@memcached) @memcached = nil end let :be_a_memcached_adapter do klasses = [ defined?(::Moneta::Adapters::MemcachedDalli) ? ::Moneta::Adapters::MemcachedDalli : nil, defined?(::Moneta::Adapters::MemcachedNative) ? ::Moneta::Adapters::MemcachedNative : nil ].compact klasses.map { |klass| be_instance_of(klass) }.inject(:or) end end moneta-1.5.2/spec/moneta/adapters/memory/000077500000000000000000000000001433316074200203355ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/memory/adapter_memory_spec.rb000066400000000000000000000003061433316074200247030ustar00rootroot00000000000000describe 'adapter_memory', adapter: :Memory do moneta_build do Moneta::Adapters::Memory.new end moneta_specs STANDARD_SPECS.with_each_key.without_transform.returnsame.without_persist end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_spec.rb000066400000000000000000000002071433316074200250630ustar00rootroot00000000000000describe 'standard_memory', adapter: :Memory do moneta_store :Memory moneta_specs STANDARD_SPECS.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_compress_spec.rb000066400000000000000000000003731433316074200300350ustar00rootroot00000000000000describe 'standard_memory_with_compress', adapter: :Memory do moneta_store :Memory, {compress: true} moneta_loader do |value| Marshal.load(::Zlib::Inflate.inflate(value)) end moneta_specs STANDARD_SPECS.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_expires_spec.rb000066400000000000000000000003631433316074200276600ustar00rootroot00000000000000describe 'standard_memory_with_expires', adapter: :Memory do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :Memory, {expires: true} moneta_specs STANDARD_SPECS.with_expires.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_json_key_serializer_spec.rb000066400000000000000000000003421433316074200322500ustar00rootroot00000000000000describe 'standard_memory_with_json_key_serializer', adapter: :Memory do moneta_store :Memory, {key_serializer: :json} moneta_specs STANDARD_SPECS.without_marshallable_key.simplekeys_only.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_json_serializer_spec.rb000066400000000000000000000004471433316074200314060ustar00rootroot00000000000000describe 'standard_memory_with_json_serializer', adapter: :Memory do moneta_store :Memory, {serializer: :json} moneta_loader do |value| ::MultiJson.load(value) end moneta_specs STANDARD_SPECS.without_marshallable.simplekeys_only.simplevalues_only.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_json_value_serializer_spec.rb000066400000000000000000000004511433316074200325750ustar00rootroot00000000000000describe 'standard_memory_with_json_value_serializer', adapter: :Memory do moneta_store :Memory, {value_serializer: :json} moneta_loader do |value| ::MultiJson.load(value) end moneta_specs STANDARD_SPECS.without_marshallable_value.simplevalues_only.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_prefix_spec.rb000066400000000000000000000023411433316074200274740ustar00rootroot00000000000000describe 'standard_memory_with_prefix', adapter: :Memory do moneta_store :Memory, { prefix: "moneta" } moneta_specs STANDARD_SPECS.without_persist.with_each_key context 'with keys from no prefix' do before(:each) do store.adapter.adapter.backend['no_prefix'] = 'hidden' end after(:each) do expect(store.adapter.adapter.backend.keys).to include('no_prefix') end include_examples :each_key end context 'with keys from other prefixes' do before do backend = store.adapter.adapter.backend @alternative_store ||= Moneta.build do use :Transformer, key: [:marshal, :prefix], value: :marshal, prefix: 'alternative_' adapter :Memory, backend: backend end expect(@alternative_store).to be_a(Moneta::Transformer::MarshalPrefixKeyMarshalValue) end let(:alternative) { @alternative_store } before(:each) do alternative.store('with_prefix_key', 'hidden') end after(:each) do expect(store.adapter.adapter.backend.keys).to include('alternative_with_prefix_key') expect(alternative.each_key.to_a).to eq(['with_prefix_key']) expect(alternative['with_prefix_key']).to eq('hidden') end include_examples :each_key end end moneta-1.5.2/spec/moneta/adapters/memory/standard_memory_with_snappy_compress_spec.rb000066400000000000000000000004411433316074200314230ustar00rootroot00000000000000describe 'standard_memory_with_snappy_compress', unstable: defined?(JRUBY_VERSION), adapter: :Memory do moneta_store :Memory, {compress: :snappy} moneta_loader do |value| Marshal.load(::Snappy.inflate(value)) end moneta_specs STANDARD_SPECS.without_persist.with_each_key end moneta-1.5.2/spec/moneta/adapters/mongo/000077500000000000000000000000001433316074200201445ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/mongo/adapter_mongo_spec.rb000066400000000000000000000021041433316074200243170ustar00rootroot00000000000000describe 'adapter_mongo', adapter: :Mongo do let(:t_res) { 0.125 } let(:min_ttl) { t_res } let(:database) { File.basename(__FILE__, '.rb') } moneta_build do Moneta::Adapters::Mongo.new( database: database, collection: 'adapter_mongo' ) end moneta_specs ADAPTER_SPECS.with_each_key.with_native_expires.simplevalues_only it 'automatically deletes expired document' do store.store('key', 'val', expires: 5) i = 0 query = store.instance_variable_get(:@collection).find(_id: ::BSON::Binary.new('key')) while i < 70 && query.first i += 1 sleep 1 # Mongo needs up to 60 seconds end i.should be > 0 # Indicates that it took at least one sleep to expire query.count.should == 0 end it 'uses the database specified via the :database option' do expect(store.config.database).to eq database end it 'uses the database specified via the :db option' do store = Moneta::Adapters::Mongo.new( db: database, collection: 'adapter_mongo' ) expect(store.config.database).to eq database end end moneta-1.5.2/spec/moneta/adapters/mongo/adapter_mongo_with_default_expires_spec.rb000066400000000000000000000006511433316074200306220ustar00rootroot00000000000000describe 'adapter_mongo_with_default_expires', isolate: true, adapter: :Mongo do let(:t_res) { 0.125 } let(:min_ttl) { t_res * 4 } moneta_build do Moneta::Adapters::Mongo.new( database: File.basename(__FILE__, '.rb'), collection: 'adapter_mongo_with_default_expires', expires: min_ttl ) end moneta_specs ADAPTER_SPECS.with_each_key.with_expires.with_default_expires.simplevalues_only end moneta-1.5.2/spec/moneta/adapters/mongo/standard_mongo_spec.rb000066400000000000000000000004051433316074200245010ustar00rootroot00000000000000describe 'standard_mongo', adapter: :Mongo do let(:t_res) { 0.125 } let(:min_ttl) { t_res } moneta_store :Mongo, {database: File.basename(__FILE__, '.rb'), collection: 'standard_mongo'} moneta_specs STANDARD_SPECS.with_native_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/null/000077500000000000000000000000001433316074200177775ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/null/null_adapter_spec.rb000066400000000000000000000003201433316074200240030ustar00rootroot00000000000000describe "null_adapter", adapter: :Null do moneta_build do Moneta::Adapters::Null.new end moneta_specs MonetaSpecs.new(specs: [:null, :not_increment, :not_create, :not_persist, :not_each_key]) end moneta-1.5.2/spec/moneta/adapters/null/standard_null_spec.rb000066400000000000000000000005031433316074200241660ustar00rootroot00000000000000describe 'standard_null', adapter: :Null do moneta_store :Null moneta_specs STANDARD_SPECS.without_increment.without_create.without_store.without_persist it 'works when constructed with a proxy object' do store = Moneta.new(:Null, expires: 1) expect { store['moneta'] = 'test' }.not_to raise_error end end moneta-1.5.2/spec/moneta/adapters/pstore/000077500000000000000000000000001433316074200203415ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/pstore/adapter_pstore_spec.rb000066400000000000000000000003751433316074200247210ustar00rootroot00000000000000describe 'adapter_pstore', unsupported: defined?(JRUBY_VERSION), adapter: :PStore do moneta_build do Moneta::Adapters::PStore.new(file: File.join(tempdir, "adapter_pstore")) end moneta_specs STANDARD_SPECS.with_each_key.without_transform end moneta-1.5.2/spec/moneta/adapters/pstore/standard_pstore_spec.rb000066400000000000000000000003711433316074200250750ustar00rootroot00000000000000describe 'standard_pstore', unsupported: defined?(JRUBY_VERSION), adapter: :PStore do moneta_store :PStore do {file: File.join(tempdir, "simple_pstore")} end moneta_loader{ |value| value } moneta_specs STANDARD_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/pstore/standard_pstore_with_expires_spec.rb000066400000000000000000000005601433316074200276670ustar00rootroot00000000000000describe 'standard_pstore_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :PStore do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :PStore do {file: File.join(tempdir, "simple_pstore_with_expires"), expires: true} end moneta_loader{ |value| value } moneta_specs STANDARD_SPECS.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/redis/000077500000000000000000000000001433316074200201335ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/redis/adapter_redis_spec.rb000066400000000000000000000010351433316074200242770ustar00rootroot00000000000000describe 'adapter_redis', adapter: :Redis do let(:t_res) { 0.125 } let(:min_ttl) { t_res } describe 'without default expiry' do moneta_build do Moneta::Adapters::Redis.new(host: redis_host, port: redis_port, db: 6) end moneta_specs ADAPTER_SPECS.with_each_key.with_native_expires end describe 'with default expiry' do moneta_build do Moneta::Adapters::Redis.new(host: redis_host, port: redis_port, db: 6, expires: min_ttl) end moneta_specs NATIVE_EXPIRY_SPECS.with_default_expires end end moneta-1.5.2/spec/moneta/adapters/redis/standard_redis_spec.rb000066400000000000000000000004151433316074200244600ustar00rootroot00000000000000describe 'standard_redis', adapter: :Redis do let(:t_res) { 0.125 } let(:min_ttl) { t_res } moneta_store :Redis do { db: 4, host: redis_host, port: redis_port } end moneta_specs STANDARD_SPECS.with_native_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/restclient/000077500000000000000000000000001433316074200212015ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/restclient/adapter_restclient_spec.rb000066400000000000000000000006161433316074200264170ustar00rootroot00000000000000require_relative '../faraday_helper.rb' require_relative './helper.rb' describe 'adapter_restclient', adapter: :RestClient do include_context :faraday_adapter include_context :start_restserver, 11933 moneta_build do Moneta::Adapters::RestClient.new(url: 'http://localhost:11933/moneta', adapter: faraday_adapter) end moneta_specs ADAPTER_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/restclient/helper.rb000066400000000000000000000004131433316074200230030ustar00rootroot00000000000000require_relative '../../../restserver.rb' RSpec.shared_context :start_restserver do |port| before :context do @restserver_handle = start_restserver(port) end after :context do stop_restserver(@restserver_handle) @restserver_handle = nil end end moneta-1.5.2/spec/moneta/adapters/restclient/standard_restclient_spec.rb000066400000000000000000000005761433316074200266040ustar00rootroot00000000000000require_relative '../faraday_helper.rb' require_relative './helper.rb' describe 'standard_restclient', adapter: :RestClient do include_context :faraday_adapter include_context :start_restserver, 11934 moneta_store :RestClient do { url: 'http://localhost:11934/moneta', adapter: faraday_adapter } end moneta_specs STANDARD_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/riak/000077500000000000000000000000001433316074200177535ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/riak/adapter_riak_spec.rb000066400000000000000000000005411433316074200237400ustar00rootroot00000000000000describe 'adapter_riak', isolate: true, unstable: true, adapter: :Riak do before :all do require 'riak' # We don't want Riak warnings in tests Riak.disable_list_keys_warnings = true end moneta_build do Moneta::Adapters::Riak.new(:bucket => 'adapter_riak') end moneta_specs ADAPTER_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/riak/standard_riak_spec.rb000066400000000000000000000004251433316074200241210ustar00rootroot00000000000000describe 'standard_riak', isolate: true, unstable: true, adapter: :Riak do before :all do require 'riak' Riak.disable_list_keys_warnings = true end moneta_store :Riak, {bucket: 'standard_riak'} moneta_specs STANDARD_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/adapters/riak/standard_riak_with_expires_spec.rb000066400000000000000000000005751433316074200267210ustar00rootroot00000000000000describe 'standard_riak_with_expires', unstable: true, adapter: :Riak do before :all do require 'riak' Riak.disable_list_keys_warnings = true end let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :Riak, {bucket: 'standard_riak_with_expires', expires: true} moneta_specs STANDARD_SPECS.without_increment.with_expires.without_create end moneta-1.5.2/spec/moneta/adapters/sdbm/000077500000000000000000000000001433316074200177525ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/sdbm/adapter_sdbm_spec.rb000066400000000000000000000004051433316074200237350ustar00rootroot00000000000000describe 'adapter_sdbm', unsupported: defined?(JRUBY_VERSION), adapter: :SDBM do moneta_build do Moneta::Adapters::SDBM.new(file: File.join(tempdir, "adapter_sdbm")) end moneta_specs ADAPTER_SPECS.without_multiprocess.without_large.with_each_key end moneta-1.5.2/spec/moneta/adapters/sdbm/standard_sdbm_spec.rb000066400000000000000000000003621433316074200241170ustar00rootroot00000000000000describe 'standard_sdbm', unsupported: defined?(JRUBY_VERSION), adapter: :SDBM do moneta_store :SDBM do {file: File.join(tempdir, "simple_sdbm")} end moneta_specs STANDARD_SPECS.without_multiprocess.without_large.with_each_key end moneta-1.5.2/spec/moneta/adapters/sdbm/standard_sdbm_with_expires_spec.rb000066400000000000000000000005511433316074200267110ustar00rootroot00000000000000describe 'standard_sdbm_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :SDBM do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :SDBM do {file: File.join(tempdir, "simple_sdbm_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.without_large.with_each_key end moneta-1.5.2/spec/moneta/adapters/sequel/000077500000000000000000000000001433316074200203235ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/sequel/adapter_sequel_spec.rb000066400000000000000000000067751433316074200246770ustar00rootroot00000000000000require_relative './helper.rb' describe ':Sequel adapter', adapter: :Sequel do include_context :sequel specs = ADAPTER_SPECS.with_each_key.with_values(:nil) context 'with MySQL backend', mysql: true do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(db: mysql_uri)) end include_examples :adapter_sequel, specs end context "with SQLite backend", sqlite: true do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(db: sqlite_uri('adapter_sequel.db'))) end include_examples :adapter_sequel, specs.without_concurrent end context "with Postgres backend", postgres: true do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(postgres_options)) end include_examples :adapter_sequel, specs end context "with H2 backend", unsupported: !defined?(JRUBY_VERSION) do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(db: h2_uri)) end include_examples :adapter_sequel, specs, optimize: false end context "with Postgres HStore backend", postgres: true do moneta_build do Moneta::Adapters::Sequel.new(postgres_hstore_options) end # Concurrency is too slow, and binary values cannot be stored in an hstore include_examples :adapter_sequel, specs.without_values(:binary).without_concurrent, optimize: false end describe 'table creation' do let(:conn_str) do "#{defined?(JRUBY_VERSION) && 'jdbc:'}sqlite://" + File.join(tempdir, 'adapter_sequel.db') end let(:backend) do Sequel.connect(conn_str) end let(:table_name) { :adapter_sequel_table_creation } before { backend.drop_table?(table_name) } shared_examples :table_creation do shared_examples :create_table do it "creates the table" do store = new_store expect(backend.table_exists?(table_name)).to be true expect(backend[table_name].columns).to include(store.key_column, store.value_column) end end context "with :db parameter" do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(db: conn_str, table: table_name)) end include_examples :create_table end context "with :backend parameter" do moneta_build do Moneta::Adapters::Sequel.new(opts.merge(backend: backend, table: table_name)) end include_examples :create_table end end context 'without :create_table option' do context 'with default columns' do let(:opts) { {} } include_examples :table_creation end context 'with :key_column option' do let(:opts) { {key_column: :some_key} } include_examples :table_creation end context 'with :value_column option' do let(:opts) { {value_column: :my_value} } include_examples :table_creation end end context 'with :create_table proc' do let :opts do { create_table: lambda do |conn| called = true conn.create_table? table_name do String :k, primary_key: true File :v Integer :other_col end end } end include_examples :table_creation end context 'with :create_table false' do moneta_build do Moneta::Adapters::Sequel.new(db: conn_str, table: table_name, create_table: false) end it "doesn't create the table" do new_store expect(backend.table_exists?(table_name)).to be false end end end end moneta-1.5.2/spec/moneta/adapters/sequel/helper.rb000066400000000000000000000040151433316074200221270ustar00rootroot00000000000000RSpec.shared_context :sequel do def mysql_uri database=nil database ||= mysql_database1 if defined?(JRUBY_VERSION) uri = "jdbc:mysql://#{mysql_host}:#{mysql_port}/#{database}?user=#{mysql_username}&useSSL=false" uri += "&password=#{mysql_password}" if mysql_password uri += "&socket=#{mysql_socket}" if mysql_socket uri else uri = "mysql2://#{mysql_username}:#{mysql_password}@#{mysql_host}:#{mysql_port}/#{database}" uri += "?socket=#{mysql_socket}" if mysql_socket uri end end def sqlite_uri file_name "#{defined?(JRUBY_VERSION) && 'jdbc:'}sqlite://" + File.join(tempdir, file_name) end def postgres_options database=nil database ||= postgres_database1 if defined?(JRUBY_VERSION) uri = "jdbc:postgresql://localhost/#{database}?user=#{postgres_username}" uri += "&password=#{postgres_password}" if postgres_password {db: uri} else { db: "postgres://localhost/#{database}", user: postgres_username, password: postgres_password } end end def postgres_hstore_options database=nil postgres_options(database).merge \ table: 'hstore_table1', hstore: 'row' end def h2_uri "jdbc:h2:" + tempdir end end RSpec.shared_examples :adapter_sequel do |specs, optimize: true| shared_examples :each_key_server do context "with each_key server" do let(:opts) do base_opts.merge( servers: {each_key: {}}, each_key_server: :each_key ) end moneta_specs specs end context "without each_key server" do let(:opts) { base_opts } moneta_specs specs end end if optimize context 'with backend optimizations' do let(:base_opts) { {table: "adapter_sequel"} } include_examples :each_key_server end end context 'without backend optimizations' do let(:base_opts) do { table: "adapter_sequel", optimize: false } end include_examples :each_key_server end end moneta-1.5.2/spec/moneta/adapters/sequel/standard_sequel_spec.rb000066400000000000000000000003701433316074200250400ustar00rootroot00000000000000require_relative './helper.rb' describe 'standard_sequel', adapter: :Sequel, postgres: true do include_context :sequel moneta_store(:Sequel) { postgres_options.merge(table: "standard_sequel") } moneta_specs STANDARD_SPECS.with_each_key end moneta-1.5.2/spec/moneta/adapters/sequel/standard_sequel_with_expires_spec.rb000066400000000000000000000006121433316074200276310ustar00rootroot00000000000000require_relative './helper.rb' describe 'standard_sequel_with_expires', adapter: :Sequel, postgres: true do include_context :sequel let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :Sequel do postgres_options.merge( table: "standard_sequel_with_expires", expires: true ) end moneta_specs STANDARD_SPECS.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/sqlite/000077500000000000000000000000001433316074200203265ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/sqlite/adapter_sqlite_spec.rb000066400000000000000000000004351433316074200246700ustar00rootroot00000000000000describe 'adapter_sqlite', unsupported: defined?(JRUBY_VERSION), adapter: :Sqlite, sqlite: true do moneta_build do Moneta::Adapters::Sqlite.new(file: File.join(tempdir, "adapter_sqlite")) end moneta_specs ADAPTER_SPECS.with_values(:nil).without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/sqlite/standard_sqlite_spec.rb000066400000000000000000000003721433316074200250500ustar00rootroot00000000000000describe 'standard_sqlite', unsupported: defined?(JRUBY_VERSION), adapter: :Sqlite, sqlite: true do moneta_store :Sqlite do {file: File.join(tempdir, "standard_sqlite")} end moneta_specs STANDARD_SPECS.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/sqlite/standard_sqlite_with_expires_spec.rb000066400000000000000000000006311433316074200276400ustar00rootroot00000000000000describe 'standard_sqlite_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :Sqlite, sqlite: true do let(:t_res) { 0.125 } let(:min_ttl) { 1 } use_timecop moneta_store :Sqlite do { file: File.join(tempdir, "standard_sqlite_with_expires"), expires: true, journal_mode: :wal } end moneta_specs STANDARD_SPECS.with_expires.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/tdb/000077500000000000000000000000001433316074200175765ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/tdb/adapter_tdb_spec.rb000066400000000000000000000003631433316074200234100ustar00rootroot00000000000000describe 'adapter_tdb', unsupported: defined?(JRUBY_VERSION), adapter: :TDB do moneta_build do Moneta::Adapters::TDB.new(file: File.join(tempdir, "adapter_tdb")) end moneta_specs ADAPTER_SPECS.with_each_key.without_multiprocess end moneta-1.5.2/spec/moneta/adapters/tdb/standard_tdb_spec.rb000066400000000000000000000003401433316074200235630ustar00rootroot00000000000000describe 'standard_tdb', unsupported: defined?(JRUBY_VERSION), adapter: :TDB do moneta_store :TDB do {file: File.join(tempdir, "simple_tdb")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/tdb/standard_tdb_with_expires_spec.rb000066400000000000000000000005271433316074200263640ustar00rootroot00000000000000describe 'standard_tdb_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :TDB do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :TDB do {file: File.join(tempdir, "simple_tdb_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/tokyocabinet/000077500000000000000000000000001433316074200215205ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/tokyocabinet/adapter_tokyocabinet_bdb_spec.rb000066400000000000000000000004531433316074200300630ustar00rootroot00000000000000describe 'adapter_tokyocabinet_bdb', unsupported: defined?(JRUBY_VERSION), adapter: :TokyoCabinet do moneta_build do Moneta::Adapters::TokyoCabinet.new(file: File.join(tempdir, "adapter_tokyocabinet_bdb"), type: :bdb) end moneta_specs ADAPTER_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/tokyocabinet/adapter_tokyocabinet_hdb_spec.rb000066400000000000000000000004531433316074200300710ustar00rootroot00000000000000describe 'adapter_tokyocabinet_hdb', unsupported: defined?(JRUBY_VERSION), adapter: :TokyoCabinet do moneta_build do Moneta::Adapters::TokyoCabinet.new(file: File.join(tempdir, "adapter_tokyocabinet_hdb"), type: :hdb) end moneta_specs ADAPTER_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/tokyocabinet/standard_tokyocabinet_spec.rb000066400000000000000000000004041433316074200274300ustar00rootroot00000000000000describe 'standard_tokyocabinet', unsupported: defined?(JRUBY_VERSION), adapter: :TokyoCabinet do moneta_store :TokyoCabinet do {file: File.join(tempdir, "simple_tokyocabinet")} end moneta_specs STANDARD_SPECS.without_multiprocess.with_each_key end moneta-1.5.2/spec/moneta/adapters/tokyocabinet/standard_tokyocabinet_with_expires_spec.rb000066400000000000000000000005731433316074200322310ustar00rootroot00000000000000describe 'standard_tokyocabinet_with_expires', unsupported: defined?(JRUBY_VERSION), adapter: :TokyoCabinet do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :TokyoCabinet do {file: File.join(tempdir, "simple_tokyocabinet_with_expires"), expires: true} end moneta_specs STANDARD_SPECS.without_multiprocess.with_expires.with_each_key end moneta-1.5.2/spec/moneta/adapters/tokyotyrant/000077500000000000000000000000001433316074200214345ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/tokyotyrant/adapter_tokyotyrant_spec.rb000066400000000000000000000003631433316074200271040ustar00rootroot00000000000000require_relative './helper.rb' describe 'adapter_tokyotyrant', adapter: :TokyoTyrant do include_context :start_tokyotyrant, 10654 moneta_build do Moneta::Adapters::TokyoTyrant.new(port: 10654) end moneta_specs ADAPTER_SPECS end moneta-1.5.2/spec/moneta/adapters/tokyotyrant/helper.rb000066400000000000000000000005271433316074200232440ustar00rootroot00000000000000RSpec.shared_context :start_tokyotyrant do |port| before :context do @tokyotyrant = spawn("ttserver -port #{port} -le -log #{tempdir}/tokyotyrant#{port}.log #{tempdir}/tokyotyrant#{port}.tch") sleep 0.5 end after :context do Process.kill("TERM", @tokyotyrant) Process.wait(@tokyotyrant) @tokyotyrant = nil end end moneta-1.5.2/spec/moneta/adapters/tokyotyrant/standard_tokyotyrant_spec.rb000066400000000000000000000003211433316074200272560ustar00rootroot00000000000000require_relative './helper.rb' describe 'standard_tokyotyrant', adapter: :TokyoTyrant do include_context :start_tokyotyrant, 10655 moneta_store :TokyoTyrant, port: 10655 moneta_specs STANDARD_SPECS end moneta-1.5.2/spec/moneta/adapters/tokyotyrant/standard_tokyotyrant_with_expires_spec.rb000066400000000000000000000004741433316074200320610ustar00rootroot00000000000000require_relative './helper.rb' describe 'standard_tokyotyrant_with_expires', adapter: :TokyoTyrant do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop include_context :start_tokyotyrant, 10656 moneta_store :TokyoTyrant, expires: true, port: 10656 moneta_specs STANDARD_SPECS.with_expires end moneta-1.5.2/spec/moneta/adapters/yaml/000077500000000000000000000000001433316074200177675ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/adapters/yaml/adapter_yaml_spec.rb000066400000000000000000000004041433316074200237660ustar00rootroot00000000000000describe 'adapter_yaml', adapter: :YAML do moneta_build do Moneta::Adapters::YAML.new(file: File.join(tempdir, "adapter_yaml")) end moneta_specs STANDARD_SPECS.simplevalues_only.simplekeys_only.with_each_key.without_transform.without_concurrent end moneta-1.5.2/spec/moneta/adapters/yaml/standard_yaml_spec.rb000066400000000000000000000003711433316074200241510ustar00rootroot00000000000000describe 'standard_yaml', adapter: :YAML do moneta_store :YAML do {file: File.join(tempdir, "simple_yaml")} end moneta_loader{ |value| value } moneta_specs STANDARD_SPECS.without_marshallable_value.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/adapters/yaml/standard_yaml_with_expires_spec.rb000066400000000000000000000005601433316074200267430ustar00rootroot00000000000000describe 'standard_yaml_with_expires', adapter: :YAML do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_store :YAML do {file: File.join(tempdir, "simple_yaml_with_expires"), expires: true} end moneta_loader{ |value| value } moneta_specs STANDARD_SPECS.without_marshallable_value.with_expires.without_concurrent.with_each_key end moneta-1.5.2/spec/moneta/builder_spec.rb000066400000000000000000000021711433316074200202100ustar00rootroot00000000000000describe Moneta::Builder do it 'raises an error if #use is called after #adapter' do expect do Moneta::Builder.new do adapter :Null use :Lock end.build end.to raise_error /Please check/ end it 'raises an error if #adapter called twice' do expect do Moneta::Builder.new do adapter :Null adapter :Null end.build end.to raise_error /Please check/ end it 'raises an error if no #adapter is specified' do expect do Moneta::Builder.new do use :Lock use :Lock end.build end.to raise_error /Please check/ end it 'dups options before passing them to each middleware' do my_adapter = Class.new do def initialize(options) throw "a is missing" unless options.delete(:a) end end my_middleware = Class.new do def initialize(backend, options) throw "a is missing" unless options.delete(:a) end end options = { a: 1 } Moneta::Builder.new do use my_middleware, options adapter my_adapter, options end.build expect(options).to include(a: 1) end end moneta-1.5.2/spec/moneta/config_spec.rb000066400000000000000000000121211433316074200200230ustar00rootroot00000000000000describe Moneta::Config do describe 'without any configuration' do it 'does not set the config attribute' do klass = Class.new do include ::Moneta::Config def initialize(**options) configure(**options) end end instance = klass.new(k: 'v') expect(instance.config).to be nil end end describe 'basic functionality' do subject :klass do Class.new do include ::Moneta::Config config :a config :b def initialize(**options) configure(**options) end end end it 'sets all config values to nil by default' do instance = klass.new expect(instance.config.a).to eq nil expect(instance.config.b).to be nil end it 'sets config values with values provided to #configure' do instance = klass.new(a: 1) expect(instance.config.a).to eq 1 expect(instance.config.b).to be nil end it 'freezes the config' do instance = klass.new expect(instance.config.frozen?).to be true end end describe 'with required arguments' do subject :klass do Class.new do include ::Moneta::Config config :a, required: true config :b, default: 'x', required: true def initialize(**options) configure(**options) end end end it 'raises an ArgumentError if #configure is called without one of the required arguments' do expect { klass.new(a: 1) }.to raise_error ArgumentError, 'b is required' expect { klass.new(b: 1) }.to raise_error ArgumentError, 'a is required' end end describe 'with defaults' do subject :klass do Class.new do include ::Moneta::Config config :a, default: 't' config :b, default: 's' def initialize(**options) configure(**options) end end end it 'uses the defaults if no argument is provided' do instance = klass.new(a: 1) expect(instance.config.a).to eq 1 expect(instance.config.b).to eq 's' end it 'allows falsy values to override truthy defaults' do instance = klass.new(a: nil, b: false) expect(instance.config.a).to be nil expect(instance.config.b).to be false end end describe 'with coercion' do describe 'using a symbol' do subject :klass do Class.new do include ::Moneta::Config config :a, coerce: :to_s def initialize(**options) configure(**options) end end end it "uses the symbol's to_proc property" do instance = klass.new(a: :x) expect(instance.config.a).to eq 'x' end end describe 'using a lambda' do subject :klass do Class.new do include ::Moneta::Config config :a, coerce: lambda { |a| a.to_sym } def initialize(**options) configure(**options) end end end it "calls the lambda" do instance = klass.new(a: 'x') expect(instance.config.a).to eq :x end end end describe 'with a block' do subject :klass do Class.new do include ::Moneta::Config config :a do |a:, b:| { a: a, b: b, test: @test } end config :b, default: 'b default' def initialize(test: nil, **options) @test = test configure(**options) end end end it 'calls the block after all arguments and defaults have been processed' do instance1 = klass.new(a: 'a value') expect(instance1.config.a).to include(a: 'a value', b: 'b default') instance2 = klass.new(b: 'b value') expect(instance2.config.a).to include(a: nil, b: 'b value') end it 'calls the block using instance_exec' do instance = klass.new(test: 'test value') expect(instance.config.a).to include(test: 'test value') end end describe 'with inheritance' do subject :klass do Class.new do include ::Moneta::Config config :a def initialize(**options) configure(**options) end end end it 'does not allow subclasses to override superclass config' do expect do Class.new(klass) do config :a end end.to raise_error ArgumentError, 'a is already a config option' end it 'does not affect the superclass when additional config is added to the subclass' do klass2 = Class.new(klass) do config :b end instance1 = klass.new(a: 1, b: 2) expect(instance1.config.to_h).to eq(a: 1) instance2 = klass2.new(a: 1, b: 2) expect(instance2.config.to_h).to eq(a: 1, b: 2) end it 'is possible for two subclasses to have the same additional config' do klass2 = Class.new(klass) do config :b end klass3 = Class.new(klass) do config :b end instance2 = klass2.new(a: 2, b: 1) expect(instance2.config.to_h).to eq(a: 2, b: 1) instance3 = klass3.new(a: 1, b: 2) expect(instance3.config.to_h).to eq(a: 1, b: 2) end end end moneta-1.5.2/spec/moneta/mutex_spec.rb000066400000000000000000000025131433316074200177240ustar00rootroot00000000000000describe 'mutex' do moneta_store :Memory it 'should have #lock' do mutex = Moneta::Mutex.new(store, 'mutex') mutex.lock.should be true mutex.locked?.should be true expect do mutex.lock end.to raise_error(RuntimeError) expect do mutex.try_lock end.to raise_error(RuntimeError) mutex.unlock.should be_nil mutex.locked?.should be false end it 'should have #enter' do mutex = Moneta::Mutex.new(store, 'mutex') mutex.enter.should be true mutex.locked?.should be true expect do mutex.enter end.to raise_error(RuntimeError) expect do mutex.try_enter end.to raise_error(RuntimeError) mutex.leave.should be_nil mutex.locked?.should be false end it 'should lock with #lock' do a = Moneta::Mutex.new(store, 'mutex') b = Moneta::Mutex.new(store, 'mutex') a.lock.should be true b.try_lock.should be false a.unlock.should be_nil end it 'should have lock timeout' do a = Moneta::Mutex.new(store, 'mutex') b = Moneta::Mutex.new(store, 'mutex') a.lock.should be true b.lock(1).should be false a.unlock.should be_nil end it 'should have #synchronize' do mutex = Moneta::Mutex.new(store, 'mutex') mutex.synchronize do mutex.locked?.should be true end mutex.locked?.should be false end end moneta-1.5.2/spec/moneta/proxies/000077500000000000000000000000001433316074200167135ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/cache/000077500000000000000000000000001433316074200177565ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/cache/cache_file_memory_spec.rb000066400000000000000000000011771433316074200247550ustar00rootroot00000000000000describe 'cache_file_memory', proxy: :Cache do moneta_build do tempdir = self.tempdir Moneta.build do use(:Cache) do adapter { adapter :File, dir: File.join(tempdir, "cache_file_memory") } cache { adapter :Memory } end end end moneta_specs ADAPTER_SPECS.returnsame.with_each_key it 'stores loaded values in cache' do store.adapter['foo'] = 'bar' store.cache['foo'].should be_nil store['foo'].should == 'bar' store.cache['foo'].should == 'bar' store.adapter.delete('foo') store['foo'].should == 'bar' store.delete('foo') store['foo'].should be_nil end end moneta-1.5.2/spec/moneta/proxies/cache/cache_memory_null_spec.rb000066400000000000000000000004451433316074200250050ustar00rootroot00000000000000describe 'cache_memory_null', proxy: :Cache do moneta_build do Moneta.build do use(:Cache) do adapter(Moneta::Adapters::Memory.new) cache(Moneta::Adapters::Null.new) end end end moneta_specs ADAPTER_SPECS.without_persist.returnsame.with_each_key end moneta-1.5.2/spec/moneta/proxies/enumerable/000077500000000000000000000000001433316074200210325ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/enumerable/enumerable_spec.rb000066400000000000000000000014071433316074200245120ustar00rootroot00000000000000describe 'enumerable', proxy: :Enumerable do moneta_build do Moneta.build do use :Enumerable adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.returnsame.without_persist.with_each_key it 'includes the enumerable interface' do expect(store).to be_a Enumerable expect(Enumerable.instance_methods).to all satisfy { |m| store.respond_to? m } end it 'allows enumeration over key-value pairs' do moneta_property_of(keys: 100, values: 100) do |m| pairs = m.keys.zip(m.values) store.merge!(pairs) expect(store.to_a).to contain_exactly(*pairs) expect(store.each.to_a).to contain_exactly(*pairs) expect(store.each_pair.to_a).to contain_exactly(*pairs) store.clear end end end moneta-1.5.2/spec/moneta/proxies/expires/000077500000000000000000000000001433316074200203725ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/expires/expires_file_spec.rb000066400000000000000000000013301433316074200244040ustar00rootroot00000000000000describe 'expires_file', proxy: :Expires do let(:t_res) { 0.125 } let(:min_ttl) { 0.5 } use_timecop moneta_build do tempdir = self.tempdir Moneta.build do use :Expires use :Transformer, key: [:marshal, :escape], value: :marshal adapter :File, dir: File.join(tempdir, "expires-file") end end moneta_specs STANDARD_SPECS.with_expires.stringvalues_only.with_each_key it 'deletes expired value in underlying file storage' do store.store('foo', 'bar', expires: 2) store['foo'].should == 'bar' sleep 1 store['foo'].should == 'bar' sleep 2 store['foo'].should be_nil store.adapter['foo'].should be_nil store.adapter.adapter['foo'].should be_nil end end moneta-1.5.2/spec/moneta/proxies/expires/expires_memory_spec.rb000066400000000000000000000004701433316074200250010ustar00rootroot00000000000000describe 'expires_memory', proxy: :Expires do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_build do Moneta.build do use :Expires adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.with_expires.without_persist.returnsame.with_each_key end moneta-1.5.2/spec/moneta/proxies/expires/expires_memory_with_default_expires_spec.rb000066400000000000000000000006321433316074200312770ustar00rootroot00000000000000describe 'expires_memory_with_default_expires', isolate: true, proxy: :Expires do let(:t_res) { 1 } let(:min_ttl) { t_res } use_timecop moneta_build do min_ttl = self.min_ttl Moneta.build do use :Expires, expires: min_ttl adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.with_expires.with_default_expires.without_persist.returnsame.with_each_key end moneta-1.5.2/spec/moneta/proxies/fallback/000077500000000000000000000000001433316074200204525ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/fallback/fallback_spec.rb000066400000000000000000000021201433316074200235430ustar00rootroot00000000000000describe 'fallback', proxy: :Fallback do context 'when the adapter is working' do moneta_build do Moneta.build do use :Fallback adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.returnsame.without_persist.with_each_key end context 'when the adapter is broken' do moneta_build do Moneta.build do use :Fallback #, rescue: [IOError, NoMethodError] adapter(Class.new do include Moneta::Defaults def load(key, options = {}) raise IOError, "deliberate error for load" end def store(key, value, options = {}) raise IOError, "deliberate error for store" end def delete(key, options = {}) raise IOError, "deliberate error for delete" end def clear(options = {}) raise IOError, "deliberate error for clear" end end.new) end end # Null adapter behaviour moneta_specs MonetaSpecs.new(specs: [:null, :not_increment, :not_create, :not_persist]) end end moneta-1.5.2/spec/moneta/proxies/lock/000077500000000000000000000000001433316074200176435ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/lock/lock_spec.rb000066400000000000000000000003311433316074200221270ustar00rootroot00000000000000describe 'lock', proxy: :Lock do moneta_build do Moneta.build do use :Lock adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.returnsame.without_persist.with_each_key end moneta-1.5.2/spec/moneta/proxies/optionmerger/000077500000000000000000000000001433316074200214255ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/optionmerger/optionmerger_spec.rb000066400000000000000000000106471433316074200255060ustar00rootroot00000000000000describe "optionmerger", proxy: :OptionMerger do moneta_store :Memory it '#with should return OptionMerger' do options = {optionname: :optionvalue} merger = store.with(options) merger.should be_instance_of(Moneta::OptionMerger) end it 'saves default options' do options = {optionname: :optionvalue} merger = store.with(options) Moneta::OptionMerger::METHODS.each do |method| merger.default_options[method].should equal(options) end end PREFIX = [['alpha', nil], ['beta', nil], ['alpha', 'beta']] it 'merges options' do merger = store.with(opt1: :val1, opt2: :val2).with(opt2: :overwrite, opt3: :val3) Moneta::OptionMerger::METHODS.each do |method| merger.default_options[method].should == {opt1: :val1, opt2: :overwrite, opt3: :val3} end end it 'merges options only for some methods' do PREFIX.each do |(alpha,beta)| options = {opt1: :val1, opt2: :val2, prefix: alpha} merger = store.with(options).with(opt2: :overwrite, opt3: :val3, prefix: beta, only: :clear) (Moneta::OptionMerger::METHODS - [:clear]).each do |method| merger.default_options[method].should equal(options) end merger.default_options[:clear].should == {opt1: :val1, opt2: :overwrite, opt3: :val3, prefix: "#{alpha}#{beta}"} merger = store.with(options).with(opt2: :overwrite, opt3: :val3, prefix: beta, only: [:load, :store]) (Moneta::OptionMerger::METHODS - [:load, :store]).each do |method| merger.default_options[method].should equal(options) end merger.default_options[:load].should == {opt1: :val1, opt2: :overwrite, opt3: :val3, prefix: "#{alpha}#{beta}"} merger.default_options[:store].should == {opt1: :val1, opt2: :overwrite, opt3: :val3, prefix: "#{alpha}#{beta}"} end end it 'merges options except for some methods' do PREFIX.each do |(alpha,beta)| options = {opt1: :val1, opt2: :val2, prefix: alpha} merger = store.with(options).with(opt2: :overwrite, opt3: :val3, except: :clear, prefix: beta) (Moneta::OptionMerger::METHODS - [:clear]).each do |method| merger.default_options[method].should == {opt1: :val1, opt2: :overwrite, opt3: :val3, prefix: "#{alpha}#{beta}"} end merger.default_options[:clear].should equal(options) merger = store.with(options).with(opt2: :overwrite, opt3: :val3, prefix: beta, except: [:load, :store]) (Moneta::OptionMerger::METHODS - [:load, :store]).each do |method| merger.default_options[method].should == {opt1: :val1, opt2: :overwrite, opt3: :val3, prefix: "#{alpha}#{beta}"} end merger.default_options[:load].should equal(options) merger.default_options[:store].should equal(options) end end it 'has method #raw' do store.raw.default_options.should == {store:{raw:true},create:{raw:true},load:{raw:true},delete:{raw:true}} store.raw.should equal(store.raw.raw) end it 'has method #expires' do store.expires(10).default_options.should == {store:{expires:10},create:{expires:10},increment:{expires:10}} end it 'has method #prefix' do store.prefix('a').default_options.should == {store:{prefix:'a'},load:{prefix:'a'},create:{prefix:'a'}, delete:{prefix:'a'},key?: {prefix:'a'},increment:{prefix:'a'}} store.prefix('a').prefix('b').default_options.should == {store:{prefix:'ab'},load:{prefix:'ab'},create:{prefix:'ab'}, delete:{prefix:'ab'},key?: {prefix:'ab'},increment:{prefix:'ab'}} store.raw.prefix('b').default_options.should == {store:{raw:true,prefix:'b'},load:{raw:true,prefix:'b'},create:{raw:true,prefix:'b'},delete:{raw:true,prefix:'b'},key?: {prefix:'b'},increment:{prefix:'b'}} store.prefix('a').raw.default_options.should == {store:{raw:true,prefix:'a'},load:{raw:true,prefix:'a'},create:{raw:true,prefix:'a'},delete:{raw:true,prefix:'a'},key?: {prefix:'a'},increment:{prefix:'a'}} end it 'supports adding proxis using #with' do compressed_store = store.with(prefix: 'compressed') do use :Transformer, value: :zlib end store['key'] = 'uncompressed value' compressed_store['key'] = 'compressed value' store['key'].should == 'uncompressed value' compressed_store['key'].should == 'compressed value' store.key?('compressedkey').should be true # Check if value is compressed compressed_store['key'].should_not == store['compressedkey'] end end moneta-1.5.2/spec/moneta/proxies/pool/000077500000000000000000000000001433316074200176645ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/pool/pool_spec.rb000066400000000000000000000301711433316074200221760ustar00rootroot00000000000000require 'timeout' describe "pool", proxy: :Pool, broken: ::Gem::Version.new(RUBY_ENGINE_VERSION) < ::Gem::Version.new('2.4.0') do describe "Moneta::Pool" do # To test the pool, we create the store once and pass the same object around # whenever a new store is requested. before :context do tempdir = @pool_tempdir = Dir.mktmpdir @pool_store = Moneta.build do use :Pool, max: 2, timeout: 5 do adapter :File, dir: tempdir end end end # Tell the manager to close all open stores after :context do @pool_store.stop FileUtils.remove_dir(@pool_tempdir) end moneta_build { @pool_store } moneta_specs ADAPTER_SPECS.with_each_key it "raises an error on check-out the builder fails" do store = Moneta.build do use :Pool do adapter(Class.new do def initialize(options = {}) raise "boom" end end) end end expect { store['x'] }.to raise_error "boom" end end describe "Moneta::Pool::PoolManager" do let :builder do double('builder').tap do |builder| i = -1 allow(builder).to receive(:build) do [stores[i += 1]] end end end let(:stores) { (0...num).map { |i| double("store#{i}") } } after { subject.kill! } shared_examples :no_ttl do context "with one store" do let(:num) { 1 } it "never expires the store" do store = stores.first expect(builder).to receive(:build).once expect(subject.check_out).to be store expect(subject.stats).to include(stores: 1, available: 0) expect(subject.check_in(store)).to eq nil expect(subject.stats).to include(stores: 1, available: 1) sleep 1 expect(subject.stats).to include(stores: 1, available: 1) expect(subject.check_out).to be store expect(subject.check_in(store)).to eq nil end end end shared_examples :no_max do context "with 1,000 stores" do let(:num) { 1_000 } it "never blocks" do # Check out 1000 stores in 1000 threads threads = (0...num).map do Thread.new { subject.check_out } end expect(threads.map(&:value)).to contain_exactly(*stores) expect(subject.stats).to include(stores: num, available: 0, waiting: 0) # Check in the first 50 expect(stores.take(50).map { |store| subject.check_in(store) }).to all(be nil) expect(subject.stats).to include(stores: num, available: 50, waiting: 0) # Now check those 50 out again threads = (0...50).map do Thread.new { subject.check_out } end expect(threads.map(&:value)).to contain_exactly(*stores.take(50)) # Finally check in all stores expect(stores.map { |store| subject.check_in(store) }).to all(be nil) end end end shared_examples :min do |min| describe "initial number of stores" do let(:num) { min } it "starts with #{min} available stores" do expect(subject.stats).to include(stores: min, available: min) expect((0...min).map { subject.check_out }).to contain_exactly(*stores) end end end shared_examples :max do |max, timeout: 5| describe "maximum number of stores" do let(:num) { max } after do expect(stores.map { |store| subject.check_in(store) }).to all be_nil end it "blocks after #{max} stores have been created" do # All stores are checked out expect(max.times.map { subject.check_out }).to contain_exactly(*stores) # Launch threads that make checkout requests (in order). These will # all block. threads = max.times.map { Thread.new { subject.check_out } } Timeout.timeout(timeout) { Thread.pass until subject.stats[:waiting] == max } expect(threads).to all be_alive # Check all stores except the first one back in. This will cause all # but one of the threads to return expect(stores.drop(1).map { |store| subject.check_in(store) }).to all be_nil Timeout.timeout(timeout) { Thread.pass until threads.select(&:alive?).length == 1 } expect(subject.stats).to include(waiting: 1) # Ensure that the stores that were checked back in are the ones that # were given to the waiting threads. alive, dead = threads.partition(&:alive?) expect(dead.map(&:value)).to contain_exactly(*stores.drop(1)) # Check the last store back in and make sure it goes to the last # waiting thread. last_thread = alive.first expect(subject.check_in(stores.first)).to eq nil Timeout.timeout(timeout) { Thread.pass while last_thread.alive? } expect(subject.stats).to include(waiting: 0) expect(last_thread.value).to be stores.first end end end shared_examples :ttl do |ttl, min: 0, max: nil| describe "closing stores after TTL expiry" do let(:num) { max || min + 10 } it "closes unneeded stores after ttl" do stores.each do |store| allow(store).to receive(:close) end Timeout.timeout(5) { Thread.pass until subject.stats[:stores] == min } expect(stores.length.times.map { subject.check_out }).to contain_exactly(*stores) expect(subject.stats).to include(stores: num, available: 0) expect(stores.map { |store| subject.check_in(store) }).to all be_nil expect(subject.stats).to include(stores: num, available: num) sleep ttl expect(subject.stats).to include(stores: min, available: min) end end end shared_examples :ttl_with_nonzero_min do |ttl:, min:, max: nil| describe "TTL check" do let(:num) { max || min + 10 } # This is testing that a very specific bug is fixed - see # https://github.com/moneta-rb/moneta/issues/197. A better long-term # solution would be to have more granular tests of the functions in the # PoolManager it "doesn't cause a busy-loop when there are available stores" do # Check a store in and out - now the manager needs to decide whether to close # stores after ttl seconds. store = subject.check_out subject.check_in(store) sleep ttl expect(subject.stats[:available]).to be > 0 # needs to be less than the TTL, but otherwise not important. sleep_time = ttl / 2.0 sleep sleep_time # during the sleep, the pool manager should have been idle. expect(subject.stats[:idle_time]).to be >= sleep_time end end end shared_examples :timeout do |timeout, max:| describe "raising an error after timeout" do let(:num) { max } it "raises a timeout error after waiting too long" do expect((0...num).map { subject.check_out }).to contain_exactly(*stores) # One extra checkout request in a separate thread t = Thread.new do Thread.current.report_on_exception = false if Thread.current.respond_to? :report_on_exception subject.check_out end Timeout.timeout(timeout) { Thread.pass until subject.stats[:waiting] == 1 } expect(subject.stats[:longest_wait]).to be_a Time expect(t).to be_alive sleep timeout Timeout.timeout(timeout) { Thread.pass while t.alive? } expect { t.value }.to raise_error Moneta::Pool::TimeoutError expect(subject.stats).to include(waiting: 0, longest_wait: nil) expect(stores.map { |store| subject.check_in store }).to all be_nil end end end context "with default arguments" do subject { Moneta::Pool::PoolManager.new(builder) } after { subject.kill! } include_examples :no_ttl include_examples :no_max include_examples :min, 0 end context "with max: 10, timeout: 4" do subject { Moneta::Pool::PoolManager.new(builder, max: 10, timeout: 4) } after { subject.kill! } include_examples :no_ttl include_examples :max, 10, timeout: 4 include_examples :min, 0 include_examples :timeout, 4, max: 10 end context "with min: 10" do subject { Moneta::Pool::PoolManager.new(builder, min: 10) } after { subject.kill! } include_examples :no_max include_examples :min, 10 end context "with ttl: 1" do subject { Moneta::Pool::PoolManager.new(builder, ttl: 1) } after { subject.kill! } include_examples :ttl, 1, min: 0 end context "with min: 10, max: 20, ttl: 1, timeout: 4" do subject { Moneta::Pool::PoolManager.new(builder, min: 10, max: 20, ttl: 1, timeout: 4) } after { subject.kill! } include_examples :min, 10 include_examples :max, 20, timeout: 4 include_examples :ttl, 1, min: 10, max: 20 include_examples :ttl_with_nonzero_min, ttl: 1, min: 10, max: 20 include_examples :timeout, 4, max: 20 end context "with min: 10, max: 10, ttl: 2, timeout: 4" do subject { Moneta::Pool::PoolManager.new(builder, min: 10, max: 10, ttl: 2, timeout: 4) } after { subject.kill! } include_examples :min, 10 include_examples :max, 10, timeout: 4 include_examples :ttl, 2, min: 10, max: 10 include_examples :ttl_with_nonzero_min, ttl: 2, min: 10, max: 10 include_examples :timeout, 4, max: 10 end describe '#check_out' do subject { Moneta::Pool::PoolManager.new(builder, max: 1, timeout: 5) } after { subject.kill! } let(:num) { 1 } it 'yields the store to requesters first come, first served' do store = stores.first procs = (0...10) .map { |i| double("proc#{i}") } .each { |p| expect(p).to receive(:call).with(store).ordered } # Each thread stops immediately after starting. We can then ensure # that each checkout is called in order. threads = procs.map do |p| Thread.new do Thread.stop p.call(subject.check_out) end end Timeout.timeout(5) { Thread.pass until threads.all?(&:stop?) } # The first thread should return immediately after waking threads.first.wakeup Timeout.timeout(5) { threads.first.join } # Wait for the remaining threads to block, one at a time. threads.drop(1).each_with_index do |thread, i| thread.wakeup Timeout.timeout(5) { Thread.pass until subject.stats[:waiting] == i + 1 } end # The remaining threads should be waiting for the store to be checked back in expect(threads.drop(1)).to all be_alive expect(subject.stats).to include(waiting: 9) Timeout.timeout(5) do threads.each do |t| t.join subject.check_in(store) end end end it "raises a ShutdownError if the pool is stopped while waiting for a store" do # Exhaust the pool store = stores.first allow(store).to receive(:close).once expect(subject.check_out).to eq store # Simulate a new thread requesting a check-out t1 = Thread.new do Thread.current.report_on_exception = false if Thread.current.respond_to? :report_on_exception subject.check_out end Timeout.timeout(5) { Thread.pass until subject.stats[:waiting] > 0 } expect(t1).to be_alive # Meanwhile in another thread, the pool is stopped. t2 = Thread.new { subject.stop } # The requesting thread should error out immediately expect { t1.value }.to raise_error Moneta::Pool::ShutdownError # In this thread we return the store to the pool, allowing graceful shutdown to complete. subject.check_in(store) expect(t2.value).to be_nil end it "raises a ShutdownError if a the pool is stopped before requesting a store" do subject.stop expect { subject.check_out }.to raise_error Moneta::Pool::ShutdownError end end end end moneta-1.5.2/spec/moneta/proxies/proxy/000077500000000000000000000000001433316074200200745ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/proxy/proxy_expires_memory_spec.rb000066400000000000000000000005561433316074200257510ustar00rootroot00000000000000describe "proxy_expires_memory", isolate: true, proxy: :Proxy do let(:t_res) { 0.125 } let(:min_ttl) { t_res } use_timecop moneta_build do Moneta.build do use :Proxy use :Expires use :Proxy adapter :Memory end end moneta_specs STANDARD_SPECS.without_transform.with_expires.returnsame.without_persist.with_each_key end moneta-1.5.2/spec/moneta/proxies/shared/000077500000000000000000000000001433316074200201615ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/shared/shared_tcp_spec.rb000066400000000000000000000025061433316074200236370ustar00rootroot00000000000000describe "shared_tcp", proxy: :Shared do moneta_build do tempdir = self.tempdir Moneta.build do use(:Shared, port: 9001) do adapter :GDBM, file: File.join(tempdir, 'shared_tcp') end end end shared_examples :shared_tcp do moneta_specs ADAPTER_SPECS.with_each_key it 'shares values' do store['shared_key'] = 'shared_value' second = new_store second.key?('shared_key').should be true second['shared_key'].should == 'shared_value' second.close end end # The first store initialised will be running the server context "running as the server" do before do store.load('dummy') expect(store.server?).to be true end include_examples :shared_tcp it 'has the underlying adapter' do store.load('dummy') expect(store.adapter.adapter).to be_a Moneta::Adapters::GDBM end end context "running as a client" do let!(:server_store) do new_store.tap { |store| store.load('dummy') } # Makes a connection end before do store.load('dummy') expect(store.server?).to be false end after do server_store.close end include_examples :shared_tcp it 'has a client adapter' do store.load('dummy') expect(store.adapter).to be_a Moneta::Adapters::Client end end end moneta-1.5.2/spec/moneta/proxies/shared/shared_unix_spec.rb000066400000000000000000000024671433316074200240420ustar00rootroot00000000000000describe "shared_unix", proxy: :Shared do moneta_build do tempdir = self.tempdir Moneta.build do use(:Shared, socket: File.join(tempdir, 'shared_unix.socket')) do adapter :GDBM, file: File.join(tempdir, 'shared_unix') end end end shared_examples :shared_unix do moneta_specs ADAPTER_SPECS.with_each_key it 'shares values' do store['shared_key'] = 'shared_value' second = new_store second.key?('shared_key').should be true second['shared_key'].should == 'shared_value' second.close end end context "runnning as the server" do before do store.load('dummy') expect(store.server?).to be true end include_examples :shared_unix it "has the underlying adapter" do store.load('dummy') expect(store.adapter.adapter).to be_a Moneta::Adapters::GDBM end end context "running as a client" do let!(:server_store) do new_store.tap { |store| store.load('dummy') } # Makes a connection end before do store.load('dummy') expect(store.server?).to be false end after do server_store.close end include_examples :shared_unix it 'has a client adapter' do store.load('dummy') expect(store.adapter).to be_a Moneta::Adapters::Client end end end moneta-1.5.2/spec/moneta/proxies/transformer/000077500000000000000000000000001433316074200212555ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/transformer/transformer_bencode_spec.rb000066400000000000000000000007231433316074200266370ustar00rootroot00000000000000describe 'transformer_bencode', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :bencode, value: :bencode adapter :Memory end end moneta_loader do |value| ::BEncode.load(value) end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::BencodeKeyBencodeValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_bert_spec.rb000066400000000000000000000007511433316074200261750ustar00rootroot00000000000000describe 'transformer_bert', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :bert, value: :bert adapter :Memory end end moneta_loader do |value| ::BERT.decode(value) end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::BertKeyBertValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_bson_spec.rb000066400000000000000000000014301433316074200261750ustar00rootroot00000000000000# Currently broken in JRuby 9.3 - see https://github.com/jruby/jruby/issues/6941 describe 'transformer_bson', proxy: :Transformer, broken: defined?(JRUBY_VERSION) && ::Gem::Version.new(JRUBY_VERSION) >= ::Gem::Version.new('9.3.0.0') do moneta_build do Moneta.build do use :Transformer, key: :bson, value: :bson adapter :Memory end end moneta_loader do |value| if ::BSON::VERSION >= '4.0.0' ::BSON::Document.from_bson(::BSON::ByteBuffer.new(value))['v'] else ::BSON::Document.from_bson(::StringIO.new(value))['v'] end end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::BsonKeyBsonValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_bzip2_spec.rb000066400000000000000000000007251433316074200262700ustar00rootroot00000000000000describe 'transformer_bzip2', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :bzip2 adapter :Memory end end moneta_loader do |value| ::RBzip2.default_adapter::Decompressor.new(::StringIO.new(value)).read end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::Bzip2Value.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_json_spec.rb000066400000000000000000000007061433316074200262120ustar00rootroot00000000000000describe 'transformer_json', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :json, value: :json adapter :Memory end end moneta_loader do |value| ::MultiJson.load(value) end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::JsonKeyJsonValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_key_inspect_spec.rb000066400000000000000000000006131433316074200275530ustar00rootroot00000000000000describe 'transformer_key_inspect', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :inspect adapter :Memory end end moneta_loader{ |value| value } moneta_specs TRANSFORMER_SPECS.returnsame.simplekeys_only it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::InspectKey.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_key_marshal_spec.rb000066400000000000000000000006111433316074200275330ustar00rootroot00000000000000describe 'transformer_key_marshal', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :marshal adapter :Memory end end moneta_loader{ |value| value } moneta_specs TRANSFORMER_SPECS.returnsame.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalKey.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_key_to_s_spec.rb000066400000000000000000000006011433316074200270470ustar00rootroot00000000000000describe 'transformer_key_to_s', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :to_s adapter :Memory end end moneta_loader{ |value| value } moneta_specs TRANSFORMER_SPECS.returnsame.simplekeys_only it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::ToSKey.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_key_yaml_spec.rb000066400000000000000000000006001433316074200270440ustar00rootroot00000000000000describe 'transformer_key_yaml', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :yaml adapter :Memory end end moneta_loader{ |value| value } moneta_specs TRANSFORMER_SPECS.returnsame.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::YamlKey.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_lz4_spec.rb000066400000000000000000000007061433316074200257520ustar00rootroot00000000000000describe 'transformer_lz4', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :lz4 adapter :Memory end end moneta_loader do |value| ::LZ4.uncompress(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::Lz4Value.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_lzma_spec.rb000066400000000000000000000007121433316074200262010ustar00rootroot00000000000000describe 'transformer_lzma', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :lzma adapter :Memory end end moneta_loader do |value| ::LZMA.decompress(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::LzmaValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_lzo_spec.rb000066400000000000000000000007061433316074200260450ustar00rootroot00000000000000describe 'transformer_lzo', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :lzo adapter :Memory end end moneta_loader do |value| ::LZO.decompress(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::LzoValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_base64_spec.rb000066400000000000000000000007721433316074200300370ustar00rootroot00000000000000describe 'transformer_marshal_base64', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :base64], value: [:marshal, :base64] adapter :Memory end end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalBase64KeyMarshalBase64Value.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_city128_spec.rb000066400000000000000000000006651433316074200301570ustar00rootroot00000000000000describe 'transformer_marshal_city128', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :city128], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalCity128KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_city32_spec.rb000066400000000000000000000006621433316074200300660ustar00rootroot00000000000000describe 'transformer_marshal_city32', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :city32], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalCity32KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_city64_spec.rb000066400000000000000000000006621433316074200300730ustar00rootroot00000000000000describe 'transformer_marshal_city64', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :city64], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalCity64KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_escape_spec.rb000066400000000000000000000010341433316074200302030ustar00rootroot00000000000000require 'uri' describe 'transformer_marshal_escape', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :escape], value: [:marshal, :escape] adapter :Memory end end moneta_loader do |value| ::Marshal.load(::URI.decode_www_form_component(value)) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compiles the transformer class' do store.should_not be_nil Moneta::Transformer::MarshalEscapeKeyMarshalEscapeValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_hex_spec.rb000066400000000000000000000007461433316074200275400ustar00rootroot00000000000000describe 'transformer_marshal_hex', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :hex], value: [:marshal, :hex] adapter :Memory end end moneta_loader do |value| ::Marshal.load([value].pack('H*')) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalHexKeyMarshalHexValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_hmac_spec.rb000066400000000000000000000010261433316074200276540ustar00rootroot00000000000000describe 'transformer_marshal_hmac', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :marshal, value: [:marshal, :hmac], secret: 'secret' adapter :Memory end end moneta_loader do |value| ::Marshal.load(::Moneta::Transformer::Helper.hmacverify(value, 'secret')) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalKeyMarshalHmacValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_md5_spec.rb000066400000000000000000000006031433316074200274310ustar00rootroot00000000000000describe 'transformer_marshal_md5', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :md5], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalMd5KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_md5_spread_spec.rb000066400000000000000000000006311433316074200307700ustar00rootroot00000000000000describe 'transformer_marshal_md5_spread', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :md5, :spread], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalMd5SpreadKeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_prefix_base64_spec.rb000066400000000000000000000014411433316074200314060ustar00rootroot00000000000000describe 'transformer_marshal_prefix_base64', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :prefix, :base64], value: [:marshal, :base64], prefix: 'moneta' adapter :Memory end end moneta_loader do |value| ::Marshal.load(value.unpack('m').first) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalPrefixBase64KeyMarshalBase64Value.should_not be_nil end context 'with keys with no prefix' do before(:each) do store.adapter.backend['no_prefix'] = 'hidden' end after(:each) do expect(store.adapter.backend.keys).to include('no_prefix') end include_examples :each_key end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_prefix_spec.rb000066400000000000000000000006541433316074200302470ustar00rootroot00000000000000describe 'transformer_marshal_prefix', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :prefix], value: :marshal, prefix: 'moneta' adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalPrefixKeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_qp_spec.rb000066400000000000000000000007461433316074200273740ustar00rootroot00000000000000describe 'transformer_marshal_qp', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :qp], value: [:marshal, :qp] adapter :Memory end end moneta_loader do |value| ::Marshal.load(value.unpack('M').first) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalQpKeyMarshalQpValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_rmd160_spec.rb000066400000000000000000000006571433316074200277660ustar00rootroot00000000000000describe 'transformer_marshal_rmd160', unstable: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :rmd160], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalRmd160KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_sha1_spec.rb000066400000000000000000000006061433316074200276030ustar00rootroot00000000000000describe 'transformer_marshal_sha1', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :sha1], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalSha1KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_sha256_spec.rb000066400000000000000000000006141433316074200277560ustar00rootroot00000000000000describe 'transformer_marshal_sha256', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :sha256], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalSha256KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_sha384_spec.rb000066400000000000000000000006141433316074200277600ustar00rootroot00000000000000describe 'transformer_marshal_sha384', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :sha384], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalSha384KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_sha512_spec.rb000066400000000000000000000006141433316074200277510ustar00rootroot00000000000000describe 'transformer_marshal_sha512', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :sha512], value: :marshal adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalSha512KeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_spec.rb000066400000000000000000000006611433316074200266700ustar00rootroot00000000000000describe 'transformer_marshal', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :marshal, value: :marshal adapter :Memory end end moneta_loader do |value| ::Marshal.load(value) end moneta_specs TRANSFORMER_SPECS.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalKeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_truncate_spec.rb000066400000000000000000000006361433316074200305770ustar00rootroot00000000000000describe 'transformer_marshal_truncate', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :truncate], value: :marshal, maxlen: 64 adapter :Memory end end moneta_specs STANDARD_SPECS.without_persist it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalTruncateKeyMarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_urlsafe_base64_spec.rb000066400000000000000000000010511433316074200315470ustar00rootroot00000000000000describe 'transformer_marshal_urlsafe_base64', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :urlsafe_base64], value: [:marshal, :urlsafe_base64] adapter :Memory end end moneta_loader do |value| ::Marshal.load(::Base64.urlsafe_decode64(value)) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalUrlsafeBase64KeyMarshalUrlsafeBase64Value.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_marshal_uuencode_spec.rb000066400000000000000000000010031433316074200305460ustar00rootroot00000000000000describe 'transformer_marshal_uuencode', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: [:marshal, :uuencode], value: [:marshal, :uuencode] adapter :Memory end end moneta_loader do |value| ::Marshal.load(value.unpack('u').first) end moneta_specs STANDARD_SPECS.without_persist.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalUuencodeKeyMarshalUuencodeValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_msgpack_spec.rb000066400000000000000000000007311433316074200266640ustar00rootroot00000000000000describe 'transformer_msgpack', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :msgpack, value: :msgpack adapter :Memory end end moneta_loader do |value| ::MessagePack.unpack(value) end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MsgpackKeyMsgpackValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_ox_spec.rb000066400000000000000000000007461433316074200256730ustar00rootroot00000000000000describe 'transformer_ox', unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :ox, value: :ox adapter :Memory end end moneta_loader do |value| ::Ox.parse_obj(value) end moneta_specs TRANSFORMER_SPECS.without_keys_or_values(:binary, :float).with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::OxKeyOxValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_php_spec.rb000066400000000000000000000007021433316074200260240ustar00rootroot00000000000000describe 'transformer_php', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :php, value: :php adapter :Memory end end moneta_loader do |value| ::PHP.unserialize(value) end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::PhpKeyPhpValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_quicklz_spec.rb000066400000000000000000000007441433316074200267250ustar00rootroot00000000000000describe 'transformer_quicklz', broken: true, unsupported: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :quicklz adapter :Memory end end moneta_loader do |value| ::QuickLZ.decompress(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::QuicklzValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_snappy_spec.rb000066400000000000000000000007141433316074200265520ustar00rootroot00000000000000describe 'transformer_snappy', unstable: defined?(JRUBY_VERSION), proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :snappy adapter :Memory end end moneta_loader do |value| ::Snappy.inflate(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::SnappyValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_tnet_spec.rb000066400000000000000000000007161433316074200262140ustar00rootroot00000000000000describe 'transformer_tnet', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :tnet, value: :tnet adapter :Memory end end moneta_loader do |value| ::TNetstring.parse(value).first end moneta_specs TRANSFORMER_SPECS.simplekeys_only.simplevalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::TnetKeyTnetValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_value_marshal_spec.rb000066400000000000000000000006361433316074200300660ustar00rootroot00000000000000describe 'transformer_value_marshal', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :marshal adapter :Memory end end moneta_loader do |value| ::Marshal.load(value) end moneta_specs TRANSFORMER_SPECS.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::MarshalValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_value_yaml_spec.rb000066400000000000000000000006221433316074200273740ustar00rootroot00000000000000describe 'transformer_value_yaml', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :yaml adapter :Memory end end moneta_loader do |value| ::YAML.load(value) end moneta_specs TRANSFORMER_SPECS.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::YamlValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_yaml_spec.rb000066400000000000000000000006371433316074200262060ustar00rootroot00000000000000describe 'transformer_yaml', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, key: :yaml, value: :yaml adapter :Memory end end moneta_loader do |value| ::YAML.load(value) end moneta_specs TRANSFORMER_SPECS.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::YamlKeyYamlValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/transformer/transformer_zlib_spec.rb000066400000000000000000000006521433316074200262010ustar00rootroot00000000000000describe 'transformer_zlib', proxy: :Transformer do moneta_build do Moneta.build do use :Transformer, value: :zlib adapter :Memory end end moneta_loader do |value| ::Zlib::Inflate.inflate(value) end moneta_specs TRANSFORMER_SPECS.stringvalues_only.with_each_key it 'compile transformer class' do store.should_not be_nil Moneta::Transformer::ZlibValue.should_not be_nil end end moneta-1.5.2/spec/moneta/proxies/weak_create/000077500000000000000000000000001433316074200211655ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/weak_create/weak_create_spec.rb000066400000000000000000000007611433316074200250020ustar00rootroot00000000000000describe 'weak_create', proxy: :WeakCreate do before :all do require 'fog/aws' # Put Fog into testing mode Fog.mock! end moneta_build do Moneta.build do use :WeakCreate adapter :Fog, aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'weak_create' end end moneta_specs ADAPTER_SPECS.without_increment.without_concurrent.returnsame end moneta-1.5.2/spec/moneta/proxies/weak_each_key/000077500000000000000000000000001433316074200214725ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/weak_each_key/weak_each_key_spec.rb000066400000000000000000000010251433316074200256060ustar00rootroot00000000000000describe 'weak_each_key', proxy: :WeakEachKey do before :all do require 'fog/aws' # Put Fog into testing mode Fog.mock! end moneta_build do Moneta.build do use :WeakEachKey adapter :Fog, aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'weak_each_key' end end moneta_specs ADAPTER_SPECS.with_each_key.without_create.without_increment.without_concurrent.returnsame end moneta-1.5.2/spec/moneta/proxies/weak_increment/000077500000000000000000000000001433316074200217065ustar00rootroot00000000000000moneta-1.5.2/spec/moneta/proxies/weak_increment/weak_increment_spec.rb000066400000000000000000000010341433316074200262360ustar00rootroot00000000000000describe 'weak_increment', proxy: :WeakIncrement do before :all do require 'fog/aws' # Put Fog into testing mode Fog.mock! end moneta_build do Moneta.build do use :WeakIncrement adapter :Fog, aws_access_key_id: 'fake_access_key_id', aws_secret_access_key: 'fake_secret_access_key', provider: 'AWS', dir: 'weak_increment' end end moneta_specs ADAPTER_SPECS.without_create.without_concurrent.returnsame end moneta-1.5.2/spec/moneta/semaphore_spec.rb000066400000000000000000000034111433316074200205430ustar00rootroot00000000000000describe "semaphore" do moneta_store :Memory it 'should have #lock' do mutex = Moneta::Semaphore.new(store, 'semaphore') mutex.lock.should be true mutex.locked?.should be true expect do mutex.lock end.to raise_error(RuntimeError) expect do mutex.try_lock end.to raise_error(RuntimeError) mutex.unlock.should be_nil mutex.locked?.should be false end it 'should have #enter' do mutex = Moneta::Semaphore.new(store, 'semaphore') mutex.enter.should be true mutex.locked?.should be true expect do mutex.enter end.to raise_error(RuntimeError) expect do mutex.try_enter end.to raise_error(RuntimeError) mutex.leave.should be_nil mutex.locked?.should be false end it 'should lock with #lock' do a = Moneta::Semaphore.new(store, 'semaphore') b = Moneta::Semaphore.new(store, 'semaphore') a.lock.should be true b.try_lock.should be false a.unlock.should be_nil end it 'should have lock timeout' do a = Moneta::Semaphore.new(store, 'semaphore') b = Moneta::Semaphore.new(store, 'semaphore') a.lock.should be true b.lock(1).should be false a.unlock.should be_nil end it 'should count concurrent accesses' do a = Moneta::Semaphore.new(store, 'semaphore', 2) b = Moneta::Semaphore.new(store, 'semaphore', 2) c = Moneta::Semaphore.new(store, 'semaphore', 2) a.synchronize do a.locked?.should be true b.synchronize do b.locked?.should be true c.try_lock.should be false end end end it 'should have #synchronize' do semaphore = Moneta::Semaphore.new(store, 'semaphore') semaphore.synchronize do semaphore.locked?.should be true end semaphore.locked?.should be false end end moneta-1.5.2/spec/moneta/stack_file_memory_spec.rb000066400000000000000000000006041433316074200222550ustar00rootroot00000000000000describe "stack_file_memory" do moneta_build do tempdir = self.tempdir Moneta.build do use(:Stack) do add(Moneta.new(:Null)) add(Moneta::Adapters::Null.new) add { adapter :File, dir: File.join(tempdir, "stack_file_memory") } add { adapter :Memory } end end end moneta_specs ADAPTER_SPECS.without_increment.without_create end moneta-1.5.2/spec/moneta/stack_memory_file_spec.rb000066400000000000000000000004471433316074200222620ustar00rootroot00000000000000describe "stack_memory_file" do moneta_build do tempdir = self.tempdir Moneta.build do use(:Stack) do add { adapter :Memory } add { adapter :File, dir: File.join(tempdir, "stack_memory_file") } end end end moneta_specs ADAPTER_SPECS.returnsame end moneta-1.5.2/spec/rack/000077500000000000000000000000001433316074200146575ustar00rootroot00000000000000moneta-1.5.2/spec/rack/cache_moneta_spec.rb000066400000000000000000000264171433316074200206360ustar00rootroot00000000000000require 'rack/cache/moneta' require 'rack/mock' require 'rack/cache' class Object def sha_like? length == 40 && self =~ /^[0-9a-z]+$/ end end describe Rack::Cache::MetaStore::Moneta do before do Rack::Cache::Moneta['meta'] = Moneta.new(:Memory, :expires => true) Rack::Cache::Moneta['entity'] = Moneta.new(:Memory, :expires => true) @store = Rack::Cache::MetaStore::Moneta.resolve uri('moneta://entity') @entity_store = Rack::Cache::EntityStore::Moneta.resolve uri('moneta://meta') @request = mock_request('/', {}) @response = mock_response(200, {}, ['hello world']) end after do Rack::Cache::Moneta['meta'].clear Rack::Cache::Moneta['entity'].clear end it "has the class referenced by homonym constant" do Rack::Cache::MetaStore::MONETA.should == Rack::Cache::MetaStore::Moneta end it "instantiates the store" do @store.should be_kind_of(Rack::Cache::MetaStore::Moneta) end it "resolves the connection uri" do Rack::Cache::MetaStore::Moneta.resolve(uri('moneta://Memory?expires=true')).should be_kind_of(Rack::Cache::MetaStore::Moneta) end # Low-level implementation methods =========================================== it 'writes a list of negotation tuples with #write' do # lambda { @store.write('/test', [[{}, {}]]) # }.should_not raise Exception end it 'reads a list of negotation tuples with #read' do @store.write('/test', [[{},{}],[{},{}]]) tuples = @store.read('/test') tuples.should == [ [{},{}], [{},{}] ] end it 'reads an empty list with #read when nothing cached at key' do @store.read('/nothing').should be_empty end it 'removes entries for key with #purge' do @store.write('/test', [[{},{}]]) @store.read('/test').should_not be_empty @store.purge('/test') @store.read('/test').should be_empty end it 'succeeds when purging non-existing entries' do @store.read('/test').should be_empty @store.purge('/test') end it 'returns nil from #purge' do @store.write('/test', [[{},{}]]) @store.purge('/test').should be_nil @store.read('/test').should == [] end %w[/test http://example.com:8080/ /test?x=y /test?x=y&p=q].each do |key| it "can read and write key: '#{key}'" do # lambda { @store.write(key, [[{},{}]]) # }.should_not raise Exception @store.read(key).should == [[{},{}]] end end it "can read and write fairly large keys" do key = "b" * 4096 # lambda { @store.write(key, [[{},{}]]) # }.should_not raise Exception @store.read(key).should == [[{},{}]] end it "allows custom cache keys from block" do request = mock_request('/test', {}) request.env['rack-cache.cache_key'] = lambda { |request| request.path_info.reverse } @store.cache_key(request).should == 'tset/' end it "allows custom cache keys from class" do request = mock_request('/test', {}) request.env['rack-cache.cache_key'] = Class.new do def self.call(request); request.path_info.reverse end end @store.cache_key(request).should == 'tset/' end it 'does not blow up when given a non-marhsalable object with an ALL_CAPS key' do store_simple_entry('/bad', { 'SOME_THING' => Proc.new {} }) end # Abstract methods =========================================================== it 'stores a cache entry' do cache_key = store_simple_entry @store.read(cache_key).should_not be_empty end it 'sets the X-Content-Digest response header before storing' do cache_key = store_simple_entry req, res = @store.read(cache_key).first res['X-Content-Digest'].should == 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3' end it 'finds a stored entry with #lookup' do store_simple_entry response = @store.lookup(@request, @entity_store) response.should_not be_nil response.should be_kind_of(Rack::Cache::Response) end it 'does not find an entry with #lookup when none exists' do req = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'}) @store.lookup(req, @entity_store).should be_nil end it "canonizes urls for cache keys" do store_simple_entry(path='/test?x=y&p=q') hits_req = mock_request(path, {}) miss_req = mock_request('/test?p=x', {}) @store.lookup(hits_req, @entity_store).should_not be_nil @store.lookup(miss_req, @entity_store).should be_nil end it 'does not find an entry with #lookup when the body does not exist' do store_simple_entry @response.headers['X-Content-Digest'].should_not be_nil @entity_store.purge(@response.headers['X-Content-Digest']) @store.lookup(@request, @entity_store).should be_nil end it 'restores response headers properly with #lookup' do store_simple_entry response = @store.lookup(@request, @entity_store) response.headers.should == @response.headers.merge('Content-Length' => '4') end it 'restores response body from entity store with #lookup' do store_simple_entry response = @store.lookup(@request, @entity_store) body = '' ; response.body.each {|p| body << p} body.should == 'test' end it 'invalidates meta and entity store entries with #invalidate' do store_simple_entry @store.invalidate(@request, @entity_store) response = @store.lookup(@request, @entity_store) response.should be_kind_of(Rack::Cache::Response) response.should_not be :fresh? end it 'succeeds quietly when #invalidate called with no matching entries' do req = mock_request('/test', {}) @store.invalidate(req, @entity_store) @store.lookup(@request, @entity_store).should be_nil end # Vary ======================================================================= it 'does not return entries that Vary with #lookup' do req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'}) req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'}) res = mock_response(200, {'Vary' => 'Foo Bar'}, ['test']) @store.store(req1, res, @entity_store) @store.lookup(req2, @entity_store).should be_nil end it 'stores multiple responses for each Vary combination' do req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'}) res1 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 1']) key = @store.store(req1, res1, @entity_store) req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'}) res2 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 2']) @store.store(req2, res2, @entity_store) req3 = mock_request('/test', {'HTTP_FOO' => 'Baz', 'HTTP_BAR' => 'Boom'}) res3 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 3']) @store.store(req3, res3, @entity_store) slurp(@store.lookup(req3, @entity_store).body).should == 'test 3' slurp(@store.lookup(req1, @entity_store).body).should == 'test 1' slurp(@store.lookup(req2, @entity_store).body).should == 'test 2' @store.read(key).length.should == 3 end it 'overwrites non-varying responses with #store' do req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'}) res1 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 1']) key = @store.store(req1, res1, @entity_store) slurp(@store.lookup(req1, @entity_store).body).should == 'test 1' req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'}) res2 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 2']) @store.store(req2, res2, @entity_store) slurp(@store.lookup(req2, @entity_store).body).should == 'test 2' req3 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'}) res3 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 3']) @store.store(req3, res3, @entity_store) slurp(@store.lookup(req1, @entity_store).body).should == 'test 3' @store.read(key).length.should == 2 end private def mock_request(uri, opts) env = Rack::MockRequest.env_for(uri, opts || {}) Rack::Cache::Request.new(env) end def mock_response(status, headers, body) headers ||= {} body = Array(body).compact Rack::Cache::Response.new(status, headers, body) end def slurp(body) buf = '' body.each { |part| buf << part } buf end # Stores an entry for the given request args, returns a url encoded cache key # for the request. def store_simple_entry(*request_args) path, headers = request_args @request = mock_request(path || '/test', headers || {}) @response = mock_response(200, {'Cache-Control' => 'max-age=420'}, ['test']) body = @response.body cache_key = @store.store(@request, @response, @entity_store) @response.body.should == body cache_key end def uri(uri) URI.parse uri end end describe Rack::Cache::EntityStore::Moneta do before do @store = Rack::Cache::EntityStore::Moneta.resolve(uri('moneta://Memory?expires=true')) end it 'has the class referenced by homonym constant' do Rack::Cache::EntityStore::MONETA.should == Rack::Cache::EntityStore::Moneta end it 'resolves the connection uri' do Rack::Cache::EntityStore::Moneta.resolve(uri('moneta://Memory?expires=true')).should be_kind_of(Rack::Cache::EntityStore::Moneta) end it 'responds to all required messages' do %w[read open write exist?].each do |message| @store.should respond_to message end end it 'stores bodies with #write' do key, size = @store.write(['My wild love went riding,']) key.should_not be_nil key.should be_sha_like data = @store.read(key) data.should == 'My wild love went riding,' end it 'takes a ttl parameter for #write' do key, size = @store.write(['My wild love went riding,'], 0) key.should_not be_nil key.should be_sha_like data = @store.read(key) data.should == 'My wild love went riding,' end it 'correctly determines whether cached body exists for key with #exist?' do key, size = @store.write(['She rode to the devil,']) @store.exist?(key).should be true @store.exist?('938jasddj83jasdh4438021ksdfjsdfjsdsf').should be false end it 'can read data written with #write' do key, size = @store.write(['And asked him to pay.']) data = @store.read(key) data.should == 'And asked him to pay.' end it 'gives a 40 character SHA1 hex digest from #write' do key, size = @store.write(['she rode to the sea;']) key.should_not be_nil key.length.should == 40 key.should match(/^[0-9a-z]+$/) key.should == '90a4c84d51a277f3dafc34693ca264531b9f51b6' end it 'returns the entire body as a String from #read' do key, size = @store.write(['She gathered together']) @store.read(key).should == 'She gathered together' end it 'returns nil from #read when key does not exist' do @store.read('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil end it 'returns a Rack compatible body from #open' do key, size = @store.write(['Some shells for her hair.']) body = @store.open(key) body.should respond_to :each buf = '' body.each { |part| buf << part } buf.should == 'Some shells for her hair.' end it 'returns nil from #open when key does not exist' do @store.open('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil end it 'deletes stored entries with #purge' do key, size = @store.write(['My wild love went riding,']) @store.purge(key).should be_nil @store.read(key).should be_nil end private def uri(uri) URI.parse uri end end moneta-1.5.2/spec/rack/moneta_cookies_spec.rb000066400000000000000000000037221433316074200212210ustar00rootroot00000000000000require 'helper' require 'rack/mock' require 'rack/moneta_cookies' describe Rack::MonetaCookies do def config(options={},&block) @options = options @block = block end def app(&block) @app_block ||= block end def backend Rack::MockRequest.new(Rack::MonetaCookies.new(lambda do |env| @store = env['rack.request.cookie_hash'] expect(@store).to equal(env['rack.moneta_cookies']) app.call(env) if app [200,{},[]] end, @options || {}, &@block)) end def get(cookies = {}, &block) app(&block) @response = backend.get('/','HTTP_COOKIE' => Rack::Utils.build_query(cookies)) end it 'should be able to read a key' do get 'key' => 'value' do expect( @store['key'] ).to eql('value') end end it 'should be able to set a key' do get do @store['key'] = 'value' end expect( @response['Set-Cookie'] ).to eql('key=value') end it 'should be able to remove a key' do get 'key' => 'value' do @store.delete('key') end expect( @response['Set-Cookie'] ).to match(/key=;/) expect( @response['Set-Cookie'] ).to match(/\s+expires=.*?1970/) end it 'should accept a config block' do config do use :Transformer, :key => :prefix, :prefix => 'moneta.' adapter :Cookie end get 'moneta.key' => 'right', 'key' => 'wrong' do expect( @store['key'] ).to eql('right') end end it 'should accept a :domain option' do config :domain => 'example.com' get do @store['key'] = 'value' end expect(@response['Set-Cookie']).to eql('key=value; domain=example.com') end it 'should accept a :path option' do config :path => '/path' get do @store['key'] = 'value' end expect(@response['Set-Cookie']).to eql('key=value; path=/path') end it 'should be accessible via Rack::Request' do get 'key' => 'value' do |env| req = Rack::Request.new(env) expect(req.cookies['key']).to eql('value') end end end moneta-1.5.2/spec/rack/moneta_store_spec.rb000066400000000000000000000035221433316074200207170ustar00rootroot00000000000000require 'helper' require 'rack/mock' require 'rack/moneta_store' describe Rack::MonetaStore do def config(store_arg = nil, options = nil, &block) @store_arg = store_arg @options = options @block = block end def app(&block) @app_block ||= block end def middleware @middleware ||= Rack::MonetaStore.new(lambda do |env| @store = env['rack.moneta_store'] app.call(env) if app [200,{},[]] end, @store_arg, @options || {}, &@block) end def backend @backend ||= Rack::MockRequest.new(middleware) end def get(&block) app(&block) @response = backend.get('/') end def uncached_store middleware.instance_variable_get(:@store) end it 'should be able to get a key without caching' do config :Memory uncached_store['key'] = 'value' get do expect(@store['key']).to eql('value') end end it 'should be able to get a key with caching' do config :Memory, :cache => true uncached_store['key'] = 'value' get do expect(@store['key']).to eql('value') expect(@store.adapter).to equal(uncached_store) expect(@store.cache['key']).to eql('value') end end it 'should be able to set a key' do config :Memory get do @store['key'] = 'value' end expect( @store['key'] ).to eql('value') expect(uncached_store['key']).to eql('value') end it 'should be able to get a remove a key' do config :Memory uncached_store['key'] = 'value' get do expect(@store.delete('key')).to eql('value') end expect(uncached_store.key?('key')).to be false end it 'should accept a config block' do config do use :Transformer, :key => :prefix, :prefix => 'moneta.' adapter :Memory end uncached_store['key'] = 'value' get do expect(@store['key']).to eql('value') end end end moneta-1.5.2/spec/rack/session_moneta_spec.rb000066400000000000000000000262241433316074200212520ustar00rootroot00000000000000require 'rack/session/moneta' require 'rack/lint' require 'rack/mock' require 'thread' describe Rack::Session::Moneta do use_timecop let(:session_key) { Rack::Session::Moneta::DEFAULT_OPTIONS[:key] } let(:session_match) { /#{session_key}=([0-9a-fA-F]+);/ } let :incrementor_proc do lambda do |env| env["rack.session"]["counter"] ||= 0 env["rack.session"]["counter"] += 1 Rack::Response.new(env["rack.session"].inspect).to_a end end let :drop_session do Rack::Lint.new(proc do |env| env['rack.session.options'][:drop] = true incrementor_proc.call(env) end) end let :renew_session do Rack::Lint.new(proc do |env| env['rack.session.options'][:renew] = true incrementor_proc.call(env) end) end let :defer_session do Rack::Lint.new(proc do |env| env['rack.session.options'][:defer] = true incrementor_proc.call(env) end) end let :skip_session do Rack::Lint.new(proc do |env| env['rack.session.options'][:skip] = true incrementor_proc.call(env) end) end let :incrementor do Rack::Lint.new(incrementor_proc) end it 'supports different constructors' do Rack::Session::Moneta.new(incrementor, :store => :Memory) Rack::Session::Moneta.new(incrementor, :store => Moneta.new(:Memory, :expires => true)) Rack::Session::Moneta.new(incrementor) do use :Expires adapter :Memory end end it "creates a new cookie" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) res = Rack::MockRequest.new(pool).get("/") res["Set-Cookie"].should include("#{session_key}=") res.body.should == '{"counter"=>1}' end it "determines session from a cookie" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) res = req.get("/") cookie = res["Set-Cookie"] req.get("/", "HTTP_COOKIE" => cookie). body.should == '{"counter"=>2}' req.get("/", "HTTP_COOKIE" => cookie). body.should == '{"counter"=>3}' end it "determines session only from a cookie by default" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) res = req.get("/") sid = res["Set-Cookie"][session_match, 1] req.get("/?rack.session=#{sid}"). body.should == '{"counter"=>1}' req.get("/?rack.session=#{sid}"). body.should == '{"counter"=>1}' end it "determines session from params" do pool = Rack::Session::Moneta.new(incrementor, :cookie_only => false, :store => :Memory) req = Rack::MockRequest.new(pool) res = req.get("/") sid = res["Set-Cookie"][session_match, 1] req.get("/?rack.session=#{sid}"). body.should == '{"counter"=>2}' req.get("/?rack.session=#{sid}"). body.should == '{"counter"=>3}' end it "survives nonexistant cookies" do bad_cookie = "rack.session=blarghfasel" pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) res = Rack::MockRequest.new(pool). get("/", "HTTP_COOKIE" => bad_cookie) res.body.should == '{"counter"=>1}' cookie = res["Set-Cookie"][session_match] cookie.should_not match(/#{bad_cookie}/) end it "maintains freshness" do pool = Rack::Session::Moneta.new(incrementor, :expire_after => 3, :store => :Memory) res = Rack::MockRequest.new(pool).get('/') res.body.should include '"counter"=>1' cookie = res["Set-Cookie"] res = Rack::MockRequest.new(pool).get('/', "HTTP_COOKIE" => cookie) res["Set-Cookie"].should == cookie res.body.should include '"counter"=>2' advance 4 res = Rack::MockRequest.new(pool).get('/', "HTTP_COOKIE" => cookie) res["Set-Cookie"].should_not == cookie res.body.should include '"counter"=>1' end it "does not send the same session id if it did not change" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) res0 = req.get("/") cookie = res0["Set-Cookie"][session_match] res0.body.should == '{"counter"=>1}' res1 = req.get("/", "HTTP_COOKIE" => cookie) res1["Set-Cookie"].should be_nil res1.body.should == '{"counter"=>2}' res2 = req.get("/", "HTTP_COOKIE" => cookie) res2["Set-Cookie"].should be_nil res2.body.should == '{"counter"=>3}' end it "deletes cookies with :drop option" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) drop = Rack::Utils::Context.new(pool, drop_session) dreq = Rack::MockRequest.new(drop) res1 = req.get("/") session = (cookie = res1["Set-Cookie"])[session_match] res1.body.should == '{"counter"=>1}' res2 = dreq.get("/", "HTTP_COOKIE" => cookie) res2["Set-Cookie"].should == nil res2.body.should == '{"counter"=>2}' res3 = req.get("/", "HTTP_COOKIE" => cookie) res3["Set-Cookie"][session_match].should_not == session res3.body.should == '{"counter"=>1}' end it "provides new session id with :renew option" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) renew = Rack::Utils::Context.new(pool, renew_session) rreq = Rack::MockRequest.new(renew) res1 = req.get("/") session = (cookie = res1["Set-Cookie"])[session_match] res1.body.should == '{"counter"=>1}' res2 = rreq.get("/", "HTTP_COOKIE" => cookie) new_cookie = res2["Set-Cookie"] new_session = new_cookie[session_match] new_session.should_not == session res2.body.should == '{"counter"=>2}' res3 = req.get("/", "HTTP_COOKIE" => new_cookie) res3.body.should == '{"counter"=>3}' # Old cookie was deleted res4 = req.get("/", "HTTP_COOKIE" => cookie) res4.body.should == '{"counter"=>1}' end it "omits cookie with :defer option but still updates the state" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) count = Rack::Utils::Context.new(pool, incrementor) defer = Rack::Utils::Context.new(pool, defer_session) dreq = Rack::MockRequest.new(defer) creq = Rack::MockRequest.new(count) res0 = dreq.get("/") res0["Set-Cookie"].should == nil res0.body.should == '{"counter"=>1}' res0 = creq.get("/") res1 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"]) res1.body.should == '{"counter"=>2}' res2 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"]) res2.body.should == '{"counter"=>3}' end it "omits cookie and state update with :skip option" do pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) count = Rack::Utils::Context.new(pool, incrementor) skip = Rack::Utils::Context.new(pool, skip_session) sreq = Rack::MockRequest.new(skip) creq = Rack::MockRequest.new(count) res0 = sreq.get("/") res0["Set-Cookie"].should == nil res0.body.should == '{"counter"=>1}' res0 = creq.get("/") res1 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"]) res1.body.should == '{"counter"=>2}' res2 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"]) res2.body.should == '{"counter"=>2}' end it "updates deep hashes correctly" do hash_check = proc do |env| session = env['rack.session'] unless session.include? 'test' session.update :a => :b, :c => { :d => :e }, :f => { :g => { :h => :i} }, 'test' => true else session[:f][:g][:h] = :j end [200, {}, [session.inspect]] end pool = Rack::Session::Moneta.new(hash_check, :store => :Memory) req = Rack::MockRequest.new(pool) res0 = req.get("/") session_id = (cookie = res0["Set-Cookie"])[session_match, 1] ses0 = pool.pool[session_id] req.get("/", "HTTP_COOKIE" => cookie) ses1 = pool.pool[session_id] ses1.should_not == ses0 end # anyone know how to do this better? it "cleanly merges sessions when multithreaded" do unless $DEBUG 1.should == 1 # fake assertion to appease the mighty bacon next end warn 'Running multithread test for Session::Memcache' pool = Rack::Session::Moneta.new(incrementor, :store => :Memory) req = Rack::MockRequest.new(pool) res = req.get('/') res.body.should == '{"counter"=>1}' cookie = res["Set-Cookie"] session_id = cookie[session_match, 1] delta_incrementor = lambda do |env| # emulate disconjoinment of threading env['rack.session'] = env['rack.session'].dup Thread.stop env['rack.session'][(Time.now.usec*rand).to_i] = true incrementor.call(env) end tses = Rack::Utils::Context.new pool, delta_incrementor treq = Rack::MockRequest.new(tses) tnum = rand(7).to_i+5 r = Array.new(tnum) do Thread.new(treq) do |run| run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true) end end.reverse.map{|t| t.run.join.value } r.each do |request| request['Set-Cookie'].should == cookie request.body.should include '"counter"=>2' end session = pool.pool[session_id] session.size.should == tnum+1 # counter session['counter'].should == 2 # meeeh tnum = rand(7).to_i+5 r = Array.new(tnum) do |i| app = Rack::Utils::Context.new pool, time_delta req = Rack::MockRequest.new app Thread.new(req) do |run| run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true) end end.reverse.map{|t| t.run.join.value } r.each do |request| request['Set-Cookie'].should == cookie request.body.should include '"counter"=>3' end session = pool.pool[session_id] session.size.should.be tnum+1 session['counter'].should.be 3 drop_counter = proc do |env| env['rack.session'].delete 'counter' env['rack.session']['foo'] = 'bar' [200, {'Content-Type'=>'text/plain'}, env['rack.session'].inspect] end tses = Rack::Utils::Context.new pool, drop_counter treq = Rack::MockRequest.new(tses) tnum = rand(7).to_i+5 r = Array.new(tnum) do Thread.new(treq) do |run| run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true) end end.reverse.map{|t| t.run.join.value } r.each do |request| request['Set-Cookie'].should == cookie request.body.should include '"foo"=>"bar"' end session = pool.pool[session_id] session.size.should.be r.size+1 session['counter'].should.be.nil? session['foo'].should == 'bar' end it "does not suffer a race-condition in get_session" do # By lying about existence of a key this proxy tricks the session # to overwrite values when it wouldn't normally. broken_key = Class.new(::Moneta::Proxy) do def key?(key, *args) false end end pool = Rack::Session::Moneta.new(incrementor) do use broken_key adapter :Memory end # Override the SID generator with one that returns predefined values. def pool.generate_sid(*) @fake_sid ||= %w(deadbeef deadbeef caffee) @fake_sid.shift || raise('Empty!') end req = Rack::MockRequest.new(pool) req.get('/') res = req.get('/') res['Set-Cookie'].should =~ /\Arack.session=caffee; / end end moneta-1.5.2/spec/restserver.rb000066400000000000000000000014341433316074200164720ustar00rootroot00000000000000require 'faraday' require 'rack' require 'rack/moneta_rest' require 'webrick' class MonetaRestServerShutdown < StandardError; end def start_restserver(port) server = Rack::Server.new( :app => Rack::Builder.app do use Rack::Lint map '/moneta' do run Rack::MonetaRest.new(:Memory) end end, :environment => 'none', :server => :webrick, :Port => port, :AccessLog => [], :Logger => WEBrick::Log.new($stderr, WEBrick::BasicLog::ERROR) ) Thread.start { server.start } begin Faraday.get("http://127.0.0.1:#{port}") rescue Faraday::ConnectionFailed tries ||= 5 tries -= 1 if tries > 0 sleep 0.1 retry else raise end end server end def stop_restserver(server) server.server.shutdown end moneta-1.5.2/test/000077500000000000000000000000001433316074200137645ustar00rootroot00000000000000moneta-1.5.2/test/action_dispatch/000077500000000000000000000000001433316074200171205ustar00rootroot00000000000000moneta-1.5.2/test/action_dispatch/fixtures/000077500000000000000000000000001433316074200207715ustar00rootroot00000000000000moneta-1.5.2/test/action_dispatch/fixtures/session_autoload_test/000077500000000000000000000000001433316074200254035ustar00rootroot00000000000000moneta-1.5.2/test/action_dispatch/fixtures/session_autoload_test/foo.rb000066400000000000000000000002551433316074200265150ustar00rootroot00000000000000module SessionAutoloadTest class Foo def initialize(bar='baz') @bar = bar end def inspect "#<#{self.class} bar:#{@bar.inspect}>" end end end moneta-1.5.2/test/action_dispatch/session_moneta_store_test.rb000066400000000000000000000125431433316074200247530ustar00rootroot00000000000000require 'action_dispatch' require 'action_controller' require 'action_dispatch/middleware/session/moneta_store' require 'minitest/autorun' class MonetaStoreTest < ActionDispatch::IntegrationTest class TestController < ActionController::Base def no_session_access head :ok end def set_session_value session[:foo] = "bar" head :ok end def set_serialized_session_value session[:foo] = SessionAutoloadTest::Foo.new head :ok end def get_session_value render plain: "foo: #{session[:foo].inspect}" end def get_session_id render plain: request.cookies['_session_id'].to_s end def call_reset_session session[:bar] reset_session session[:bar] = "baz" head :ok end end def test_setting_and_getting_session_value with_test_route_set do get '/set_session_value' assert_response :success assert cookies['_session_id'] get '/get_session_value' assert_response :success assert_equal 'foo: "bar"', response.body end end def test_getting_nil_session_value with_test_route_set do get '/get_session_value' assert_response :success assert_equal 'foo: nil', response.body end end def test_getting_session_value_after_session_reset with_test_route_set do get '/set_session_value' assert_response :success assert cookies['_session_id'] session_cookie = cookies.to_hash['_session_id'] get '/call_reset_session' assert_response :success assert_not_equal [], headers['Set-Cookie'] cookies.merge(session_cookie) # replace our new session_id with our old, pre-reset session_id get '/get_session_value' assert_response :success assert_equal 'foo: nil', response.body, "data for this session should have been obliterated from cache" end end def test_getting_from_nonexistent_session with_test_route_set do get '/get_session_value' assert_response :success assert_equal 'foo: nil', response.body assert_nil cookies['_session_id'], "should only create session on write, not read" end end def test_setting_session_value_after_session_reset with_test_route_set do get '/set_session_value' assert_response :success assert cookies['_session_id'] session_id = cookies['_session_id'] get '/call_reset_session' assert_response :success assert_not_equal [], headers['Set-Cookie'] get '/get_session_value' assert_response :success assert_equal 'foo: nil', response.body get '/get_session_id' assert_response :success assert_not_equal session_id, response.body end end def test_getting_session_id with_test_route_set do get '/set_session_value' assert_response :success assert cookies['_session_id'] session_id = cookies['_session_id'] get '/get_session_id' assert_response :success assert_equal session_id, response.body, "should be able to read session id without accessing the session hash" end end def test_deserializes_unloaded_class with_test_route_set do with_autoload_path do get '/set_serialized_session_value' assert_response :success assert cookies['_session_id'] end with_autoload_path do get '/get_session_id' assert_response :success end with_autoload_path do get '/get_session_value' assert_response :success assert_equal 'foo: #', response.body, "should auto-load unloaded class" end end end def test_doesnt_write_session_cookie_if_session_id_is_already_exists with_test_route_set do get '/set_session_value' assert_response :success assert cookies['_session_id'] get '/get_session_value' assert_response :success assert_nil headers['Set-Cookie'], "should not resend the cookie again if session_id cookie is already exists" end end def test_prevents_session_fixation with_test_route_set do get '/get_session_value' assert_response :success assert_equal 'foo: nil', response.body session_id = cookies['_session_id'] reset! get "/set_session_value?_session_id=#{session_id}" assert_response :success assert_not_equal session_id, cookies['_session_id'] end end private def with_autoload_path path = File.join(File.dirname(__FILE__), 'fixtures') if ActiveSupport::Dependencies.autoload_paths.include?(path) yield else begin ActiveSupport::Dependencies.autoload_paths << path yield ensure ActiveSupport::Dependencies.autoload_paths.reject! { |p| p == path } ActiveSupport::Dependencies.clear end end end def with_test_route_set with_routing do |set| set.draw do scope module: "moneta_store_test" do controller "test" do get 'set_session_value' get 'get_session_value' get 'call_reset_session' get 'get_session_id' get 'set_serialized_session_value' end end end @app = ActionDispatch::MiddlewareStack.new do |middleware| middleware.use ActionDispatch::Session::MonetaStore, key: '_session_id', store: :Memory end.build(set) yield end end end