pax_global_header 0000666 0000000 0000000 00000000064 13362715446 0014525 g ustar 00root root 0000000 0000000 52 comment=b50bb8245758a722f9525d44f05136cd3a0dac43
dalli-2.7.9/ 0000775 0000000 0000000 00000000000 13362715446 0012631 5 ustar 00root root 0000000 0000000 dalli-2.7.9/.gitignore 0000664 0000000 0000000 00000001116 13362715446 0014620 0 ustar 00root root 0000000 0000000 *.gem
*.rbc
/.config
/coverage/
/InstalledFiles
/pkg/
/spec/reports/
/test/tmp/
/test/version_tmp/
/tmp/
## Specific to RubyMotion:
.dat*
.repl_history
build/
## Documentation cache and generated files:
/.yardoc/
/_yardoc/
/doc/
/html/
/rdoc/
## Environment normalisation:
/.bundle/
/lib/bundler/man/
# for a library or gem, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
Gemfile.lock
gemfiles/*.lock
.ruby-version
.ruby-gemset
# unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
.rvmrc
dalli-2.7.9/.travis.yml 0000664 0000000 0000000 00000001525 13362715446 0014745 0 ustar 00root root 0000000 0000000 language: ruby
cache: bundler
rvm:
- 2.5.3
- 2.4.4
- 2.3.8
- 2.2.10
- 2.1.10
- 2.0.0
- 1.9.3
- jruby-9.1.16.0
gemfile:
- gemfiles/rails3.gemfile
- gemfiles/rails4.gemfile
- gemfiles/rails5.gemfile
matrix:
exclude:
- rvm: 2.1.10
gemfile: gemfiles/rails5.gemfile
- rvm: 2.0.0
gemfile: gemfiles/rails5.gemfile
- rvm: 1.9.3
gemfile: gemfiles/rails5.gemfile
- rvm: 1.9.3
gemfile: gemfiles/rails4.gemfile
fast_finish: true
env:
global:
- JRUBY_OPTS='--debug'
script:
- bundle exec rake
before_install:
- gem install bundler
- sudo apt-get -y remove memcached
- sudo apt-get install libevent-dev
- wget https://memcached.org/files/memcached-1.4.15.tar.gz
- tar -zxvf memcached-1.4.15.tar.gz
- cd memcached-1.4.15
- ./configure --enable-sasl
- make
- sudo make install
dalli-2.7.9/Appraisals 0000664 0000000 0000000 00000000254 13362715446 0014654 0 ustar 00root root 0000000 0000000 appraise 'rails3' do
gem 'rails', '>= 3.2.0', '< 4'
end
appraise 'rails4' do
gem 'rails', '>= 4.0.0', '< 5'
end
appraise 'rails5' do
gem 'rails', '5.0.0.beta2'
end
dalli-2.7.9/Gemfile 0000664 0000000 0000000 00000000106 13362715446 0014121 0 ustar 00root root 0000000 0000000 source 'https://rubygems.org'
gemspec
gem 'kgio', :platform => :mri
dalli-2.7.9/History.md 0000664 0000000 0000000 00000036260 13362715446 0014623 0 ustar 00root root 0000000 0000000 Dalli Changelog
=====================
2.7.9
==========
- Fix behavior for Rails 5.2+ cache_versioning (GriwMF)
- Ensure fetch provides the key to the fallback block as an argument (0exp)
- Assorted performance improvements (schneems)
2.7.8
==========
- Rails 5.2 compatibility (pbougie)
- Fix Session Cache compatibility (pixeltrix)
2.7.7
==========
- Support large cache keys on fetch multi (sobrinho)
- Not found checks no longer trigger the result's equality method (dannyfallon)
- Use SVG build badges (olleolleolle)
- Travis updates (junaruga, tiarly, petergoldstein)
- Update default down_retry_delay (jaredhales)
- Close kgio socket after IO.select timeouts
- Documentation updates (tipair)
- Instrument DalliStore errors with instrument_errors configuration option. (btatnall)
2.7.6
==========
- Rails 5.0.0.beta2 compatibility (yui-knk, petergoldstein)
- Add cas!, a variant of the #cas method that yields to the block whether or not the key already exist (mwpastore)
- Performance improvements (nateberkopec)
- Add Ruby 2.3.0 to support matrix (tricknotes)
2.7.5
==========
- Support rcvbuff and sndbuff byte configuration. (btatnall)
- Add `:cache_nils` option to support nil values in `DalliStore#fetch` and `Dalli::Client#fetch` (wjordan, #559)
- Log retryable server errors with 'warn' instead of 'info' (phrinx)
- Fix timeout issue with Dalli::Client#get_multi_yielder (dspeterson)
- Escape namespaces with special regexp characters (Steven Peckins)
- Ensure LocalCache supports the `:raw` option and Entry unwrapping (sj26)
- Ensure bad ttl values don't cause Dalli::RingError (eagletmt, petergoldstein)
- Always pass namespaced key to instrumentation API (kaorimatz)
- Replace use of deprecated TimeoutError with Timeout::Error (eagletmt)
- Clean up gemspec, and use Bundler for loading (grosser)
- Dry up local cache testing (grosser)
2.7.4
==========
- Restore Windows compatibility (dfens, #524)
2.7.3
==========
- Assorted spec improvements
- README changes to specify defaults for failover and compress options (keen99, #470)
- SASL authentication changes to deal with Unicode characters (flypiggy, #477)
- Call to_i on ttl to accomodate ActiveSupport::Duration (#494)
- Change to implicit blocks for performance (glaucocustodio, #495)
- Change to each_key for performance (jastix, #496)
- Support stats settings - (dterei, #500)
- Raise DallError if hostname canno be parsed (dannyfallon, #501)
- Fix instrumentation for falsey values (AlexRiedler, #514)
- Support UNIX socket configurations (r-stu31, #515)
2.7.2
==========
- The fix for #423 didn't make it into the released 2.7.1 gem somehow.
2.7.1
==========
- Rack session will check if servers are up on initialization (arthurnn, #423)
- Add support for IPv6 addresses in hex form, ie: "[::1]:11211" (dplummer, #428)
- Add symbol support for namespace (jingkai #431)
- Support expiration intervals longer than 30 days (leonid-shevtsov #436)
2.7.0
==========
- BREAKING CHANGE:
Dalli::Client#add and #replace now return a truthy value, not boolean true or false.
- Multithreading support with dalli\_store:
Use :pool\_size to create a pool of shared, threadsafe Dalli clients in Rails:
```ruby
config.cache_store = :dalli_store, "cache-1.example.com", "cache-2.example.com", :compress => true, :pool_size => 5, :expires_in => 300
```
This will ensure the Rails.cache singleton does not become a source of contention.
**PLEASE NOTE** Rails's :mem\_cache\_store does not support pooling as of
Rails 4.0. You must use :dalli\_store.
- Implement `version` for retrieving version of connected servers [dterei, #384]
- Implement `fetch_multi` for batched read/write [sorentwo, #380]
- Add more support for safe updates with multiple writers: [philipmw, #395]
`require 'dalli/cas/client'` augments Dalli::Client with the following methods:
* Get value with CAS: `[value, cas] = get_cas(key)`
`get_cas(key) {|value, cas| ...}`
* Get multiple values with CAS: `get_multi_cas(k1, k2, ...) {|value, metadata| cas = metadata[:cas]}`
* Set value with CAS: `new_cas = set_cas(key, value, cas, ttl, options)`
* Replace value with CAS: `replace_cas(key, new_value, cas, ttl, options)`
* Delete value with CAS: `delete_cas(key, cas)`
- Fix bug with get key with "Not found" value [uzzz, #375]
2.6.4
=======
- Fix ADD command, aka `write(unless_exist: true)` (pitr, #365)
- Upgrade test suite from mini\_shoulda to minitest.
- Even more performance improvements for get\_multi (xaop, #331)
2.6.3
=======
- Support specific stats by passing `:items` or `:slabs` to `stats` method [bukhamseen]
- Fix 'can't modify frozen String' errors in `ActiveSupport::Cache::DalliStore` [dblock]
- Protect against objects with custom equality checking [theron17]
- Warn if value for key is too large to store [locriani]
2.6.2
=======
- Properly handle missing RubyInline
2.6.1
=======
- Add optional native C binary search for ring, add:
gem 'RubyInline'
to your Gemfile to get a 10% speedup when using many servers.
You will see no improvement if you are only using one server.
- More get_multi performance optimization [xaop, #315]
- Add lambda support for cache namespaces [joshwlewis, #311]
2.6.0
=======
- read_multi optimization, now checks local_cache [chendo, #306]
- Re-implement get_multi to be non-blocking [tmm1, #295]
- Add `dalli` accessor to dalli_store to access the underlying
Dalli::Client, for things like `get_multi`.
- Add `Dalli::GzipCompressor`, primarily for compatibility with nginx's HttpMemcachedModule using `memcached_gzip_flag`
2.5.0
=======
- Don't escape non-ASCII keys, memcached binary protocol doesn't care. [#257]
- :dalli_store now implements LocalCache [#236]
- Removed lots of old session_store test code, tests now all run without a default memcached server [#275]
- Changed Dalli ActiveSupport adapter to always attempt instrumentation [brianmario, #284]
- Change write operations (add/set/replace) to return false when value is too large to store [brianmario, #283]
- Allowing different compressors per client [naseem]
2.4.0
=======
- Added the ability to swap out the compressed used to [de]compress cache data [brianmario, #276]
- Fix get\_multi performance issues with lots of memcached servers [tmm1]
- Throw more specific exceptions [tmm1]
- Allowing different types of serialization per client [naseem]
2.3.0
=======
- Added the ability to swap out the serializer used to [de]serialize cache data [brianmario, #274]
2.2.1
=======
- Fix issues with ENV-based connections. [#266]
- Fix problem with SessionStore in Rails 4.0 [#265]
2.2.0
=======
- Add Rack session with\_lock helper, for Rails 4.0 support [#264]
- Accept connection string in the form of a URL (e.g., memcached://user:pass@hostname:port) [glenngillen]
- Add touch operation [#228, uzzz]
2.1.0
=======
- Add Railtie to auto-configure Dalli when included in Gemfile [#217, steveklabnik]
2.0.5
=======
- Create proper keys for arrays of objects passed as keys [twinturbo, #211]
- Handle long key with namespace [#212]
- Add NODELAY to TCP socket options [#206]
2.0.4
=======
- Dalli no longer needs to be reset after Unicorn/Passenger fork [#208]
- Add option to re-raise errors rescued in the session and cache stores. [pitr, #200]
- DalliStore#fetch called the block if the cached value == false [#205]
- DalliStore should have accessible options [#195]
- Add silence and mute support for DalliStore [#207]
- Tracked down and fixed socket corruption due to Timeout [#146]
2.0.3
=======
- Allow proper retrieval of stored `false` values [laserlemon, #197]
- Allow non-ascii and whitespace keys, only the text protocol has those restrictions [#145]
- Fix DalliStore#delete error-handling [#196]
2.0.2
=======
- Fix all dalli\_store operations to handle nil options [#190]
- Increment and decrement with :initial => nil now return nil (lawrencepit, #112)
2.0.1
=======
- Fix nil option handling in dalli\_store#write [#188]
2.0.0
=======
- Reimplemented the Rails' dalli\_store to remove use of
ActiveSupport::Cache::Entry which added 109 bytes overhead to every
value stored, was a performance bottleneck and duplicated a lot of
functionality already in Dalli. One benchmark went from 4.0 sec to 3.0
sec with the new dalli\_store. [#173]
- Added reset\_stats operation [#155]
- Added support for configuring keepalive on TCP connections to memcached servers (@bianster, #180)
Notes:
* data stored with dalli\_store 2.x is NOT backwards compatible with 1.x.
Upgraders are advised to namespace their keys and roll out the 2.x
upgrade slowly so keys do not clash and caches are warmed.
`config.cache_store = :dalli_store, :expires_in => 24.hours.to_i, :namespace => 'myapp2'`
* data stored with plain Dalli::Client API is unchanged.
* removed support for dalli\_store's race\_condition\_ttl option.
* removed support for em-synchrony and unix socket connection options.
* removed support for Ruby 1.8.6
* removed memcache-client compability layer and upgrade documentation.
1.1.5
=======
- Coerce input to incr/decr to integer via #to\_i [#165]
- Convert test suite to minitest/spec (crigor, #166)
- Fix encoding issue with keys [#162]
- Fix double namespacing with Rails and dalli\_store. [#160]
1.1.4
=======
- Use 127.0.0.1 instead of localhost as default to avoid IPv6 issues
- Extend DalliStore's :expires\_in when :race\_condition\_ttl is also used.
- Fix :expires\_in option not propogating from DalliStore to Client, GH-136
- Added support for native Rack session store. Until now, Dalli's
session store has required Rails. Now you can use Dalli to store
sessions for any Rack application.
require 'rack/session/dalli'
use Rack::Session::Dalli, :memcache_server => 'localhost:11211', :compression => true
1.1.3
=======
- Support Rails's autoloading hack for loading sessions with objects
whose classes have not be required yet, GH-129
- Support Unix sockets for connectivity. Shows a 2x performance
increase but keep in mind they only work on localhost. (dfens)
1.1.2
=======
- Fix incompatibility with latest Rack session API when destroying
sessions, thanks @twinge!
1.1.1
=======
v1.1.0 was a bad release. Yanked.
1.1.0
=======
- Remove support for Rails 2.3, add support for Rails 3.1
- Fix socket failure retry logic, now you can restart memcached and Dalli won't complain!
- Add support for fibered operation via em-synchrony (eliaslevy)
- Gracefully handle write timeouts, GH-99
- Only issue bug warning for unexpected StandardErrors, GH-102
- Add travis-ci build support (ryanlecompte)
- Gracefully handle errors in get_multi (michaelfairley)
- Misc fixes from crash2burn, fphilipe, igreg, raggi
1.0.5
=======
- Fix socket failure retry logic, now you can restart memcached and Dalli won't complain!
1.0.4
=======
- Handle non-ASCII key content in dalli_store
- Accept key array for read_multi in dalli_store
- Fix multithreaded race condition in creation of mutex
1.0.3
=======
- Better handling of application marshalling errors
- Work around jruby IO#sysread compatibility issue
1.0.2
=======
- Allow browser session cookies (blindsey)
- Compatibility fixes (mwynholds)
- Add backwards compatibility module for memcache-client, require 'dalli/memcache-client'. It makes
Dalli more compatible with memcache-client and prints out a warning any time you do something that
is no longer supported so you can fix your code.
1.0.1
=======
- Explicitly handle application marshalling bugs, GH-56
- Add support for username/password as options, to allow multiple bucket access
from the same Ruby process, GH-52
- Add support for >1MB values with :value_max_bytes option, GH-54 (r-stu31)
- Add support for default TTL, :expires_in, in Rails 2.3. (Steven Novotny)
config.cache_store = :dalli_store, 'localhost:11211', {:expires_in => 4.hours}
1.0.0
=======
Welcome gucki as a Dalli committer!
- Fix network and namespace issues in get_multi (gucki)
- Better handling of unmarshalling errors (mperham)
0.11.2
=======
- Major reworking of socket error and failover handling (gucki)
- Add basic JRuby support (mperham)
0.11.1
======
- Minor fixes, doc updates.
- Add optional support for kgio sockets, gives a 10-15% performance boost.
0.11.0
======
Warning: this release changes how Dalli marshals data. I do not guarantee compatibility until 1.0 but I will increment the minor version every time a release breaks compatibility until 1.0.
IT IS HIGHLY RECOMMENDED YOU FLUSH YOUR CACHE BEFORE UPGRADING.
- multi() now works reentrantly.
- Added new Dalli::Client option for default TTLs, :expires_in, defaults to 0 (aka forever).
- Added new Dalli::Client option, :compression, to enable auto-compression of values.
- Refactor how Dalli stores data on the server. Values are now tagged
as "marshalled" or "compressed" so they can be automatically deserialized
without the client having to know how they were stored.
0.10.1
======
- Prefer server config from environment, fixes Heroku session store issues (thanks JoshMcKin)
- Better handling of non-ASCII values (size -> bytesize)
- Assert that keys are ASCII only
0.10.0
======
Warning: this release changed how Rails marshals data with Dalli. Unfortunately previous versions double marshalled values. It is possible that data stored with previous versions of Dalli will not work with this version.
IT IS HIGHLY RECOMMENDED YOU FLUSH YOUR CACHE BEFORE UPGRADING.
- Rework how the Rails cache store does value marshalling.
- Rework old server version detection to avoid a socket read hang.
- Refactor the Rails 2.3 :dalli\_store to be closer to :mem\_cache\_store.
- Better documentation for session store config (plukevdh)
0.9.10
----
- Better server retry logic (next2you)
- Rails 3.1 compatibility (gucki)
0.9.9
----
- Add support for *_multi operations for add, set, replace and delete. This implements
pipelined network operations; Dalli disables network replies so we're not limited by
latency, allowing for much higher throughput.
dc = Dalli::Client.new
dc.multi do
dc.set 'a', 1
dc.set 'b', 2
dc.set 'c', 3
dc.delete 'd'
end
- Minor fix to set the continuum sorted by value (kangster)
- Implement session store with Rails 2.3. Update docs.
0.9.8
-----
- Implement namespace support
- Misc fixes
0.9.7
-----
- Small fix for NewRelic integration.
- Detect and fail on older memcached servers (pre-1.4).
0.9.6
-----
- Patches for Rails 3.0.1 integration.
0.9.5
-----
- Major design change - raw support is back to maximize compatibility with Rails
and the increment/decrement operations. You can now pass :raw => true to most methods
to bypass (un)marshalling.
- Support symbols as keys (ddollar)
- Rails 2.3 bug fixes
0.9.4
-----
- Dalli support now in rack-bug (http://github.com/brynary/rack-bug), give it a try!
- Namespace support for Rails 2.3 (bpardee)
- Bug fixes
0.9.3
-----
- Rails 2.3 support (beanieboi)
- Rails SessionStore support
- Passenger integration
- memcache-client upgrade docs, see Upgrade.md
0.9.2
----
- Verify proper operation in Heroku.
0.9.1
----
- Add fetch and cas operations (mperham)
- Add incr and decr operations (mperham)
- Initial support for SASL authentication via the MEMCACHE_{USERNAME,PASSWORD} environment variables, needed for Heroku (mperham)
0.9.0
-----
- Initial gem release.
dalli-2.7.9/LICENSE 0000664 0000000 0000000 00000002056 13362715446 0013641 0 ustar 00root root 0000000 0000000 Copyright (c) Peter M. Goldstein, Mike Perham
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
dalli-2.7.9/Performance.md 0000664 0000000 0000000 00000004261 13362715446 0015417 0 ustar 00root root 0000000 0000000 Performance
====================
Caching is all about performance, so I carefully track Dalli performance to ensure no regressions.
You can optionally use kgio to give Dalli a 10-20% performance boost: `gem install kgio`.
Note I've added some benchmarks over time to Dalli that the other libraries don't necessarily have.
memcache-client
---------------
Testing 1.8.5 with ruby 1.9.3p0 (2011-10-30 revision 33570) [x86_64-darwin11.2.0]
user system total real
set:plain:memcache-client 1.860000 0.310000 2.170000 ( 2.188030)
set:ruby:memcache-client 1.830000 0.290000 2.120000 ( 2.130212)
get:plain:memcache-client 1.830000 0.340000 2.170000 ( 2.176156)
get:ruby:memcache-client 1.900000 0.330000 2.230000 ( 2.235045)
multiget:ruby:memcache-client 0.860000 0.120000 0.980000 ( 0.987348)
missing:ruby:memcache-client 1.630000 0.320000 1.950000 ( 1.954867)
mixed:ruby:memcache-client 3.690000 0.670000 4.360000 ( 4.364469)
dalli
-----
Testing with Rails 3.2.1
Using kgio socket IO
Testing 2.0.0 with ruby 1.9.3p125 (2012-02-16 revision 34643) [x86_64-darwin11.3.0]
user system total real
mixed:rails:dalli 1.580000 0.570000 2.150000 ( 3.008839)
set:plain:dalli 0.730000 0.300000 1.030000 ( 1.567098)
setq:plain:dalli 0.520000 0.120000 0.640000 ( 0.634402)
set:ruby:dalli 0.800000 0.300000 1.100000 ( 1.640348)
get:plain:dalli 0.840000 0.330000 1.170000 ( 1.668425)
get:ruby:dalli 0.850000 0.330000 1.180000 ( 1.665716)
multiget:ruby:dalli 0.700000 0.260000 0.960000 ( 0.965423)
missing:ruby:dalli 0.720000 0.320000 1.040000 ( 1.511720)
mixed:ruby:dalli 1.660000 0.640000 2.300000 ( 3.320743)
mixedq:ruby:dalli 1.630000 0.510000 2.140000 ( 2.629734)
incr:ruby:dalli 0.270000 0.100000 0.370000 ( 0.547618)
dalli-2.7.9/README.md 0000664 0000000 0000000 00000025142 13362715446 0014114 0 ustar 00root root 0000000 0000000 Dalli [](http://travis-ci.org/petergoldstein/dalli) [](https://gemnasium.com/petergoldstein/dalli) [](https://codeclimate.com/github/petergoldstein/dalli)
=====
Dalli is a high performance pure Ruby client for accessing memcached servers. It works with memcached 1.4+ only as it uses the newer binary protocol. It should be considered a replacement for the memcache-client gem.
The name is a variant of Salvador Dali for his famous painting [The Persistence of Memory](http://en.wikipedia.org/wiki/The_Persistence_of_Memory).

Dalli's initial development was sponsored by [CouchBase](http://www.couchbase.com/). Many thanks to them!
Design
------------
Mike Perham decided to write Dalli after maintaining memcache-client for two years for a few specific reasons:
0. The code is mostly old and gross. The bulk of the code is a single 1000 line .rb file.
1. It has a lot of options that are infrequently used which complicate the codebase.
2. The implementation has no single point to attach monitoring hooks.
3. Uses the old text protocol, which hurts raw performance.
So a few notes. Dalli:
0. uses the exact same algorithm to choose a server so existing memcached clusters with TBs of data will work identically to memcache-client.
1. is approximately 20% faster than memcache-client (which itself was heavily optimized) in Ruby 1.9.2.
2. contains explicit "chokepoint" methods which handle all requests; these can be hooked into by monitoring tools (NewRelic, Rack::Bug, etc) to track memcached usage.
3. supports SASL for use in managed environments, e.g. Heroku.
4. provides proper failover with recovery and adjustable timeouts
Supported Ruby versions and implementations
------------------------------------------------
Dalli should work identically on:
* JRuby 1.6+
* Ruby 1.9.3+
* Rubinius 2.0
If you have problems, please enter an issue.
Installation and Usage
------------------------
Remember, Dalli **requires** memcached 1.4+. You can check the version with `memcached -h`. Please note that the memcached version that *Mac OS X Snow Leopard* ships with is 1.2.8 and it won't work. Install memcached 1.4.x using Homebrew with
brew install memcached
On Ubuntu you can install it by running:
apt-get install memcached
You can verify your installation using this piece of code:
```bash
gem install dalli
```
```ruby
require 'dalli'
options = { :namespace => "app_v1", :compress => true }
dc = Dalli::Client.new('localhost:11211', options)
dc.set('abc', 123)
value = dc.get('abc')
```
The test suite requires memcached 1.4.3+ with SASL enabled (`brew install memcached --enable-sasl ; mv /usr/bin/memcached /usr/bin/memcached.old`). Currently only supports the PLAIN mechanism.
Dalli has no runtime dependencies and never will. If you are using Ruby <2.3,
you can optionally install the '[kgio](https://bogomips.org/kgio/)' gem to
give Dalli a 20-30% performance boost.
Usage with Rails 3.x and 4.x
---------------------------
In your Gemfile:
```ruby
gem 'dalli'
```
In `config/environments/production.rb`:
```ruby
config.cache_store = :dalli_store
```
Here's a more comprehensive example that sets a reasonable default for maximum cache entry lifetime (one day), enables compression for large values and namespaces all entries for this rails app. Remove the namespace if you have multiple apps which share cached values.
```ruby
config.cache_store = :dalli_store, 'cache-1.example.com', 'cache-2.example.com:11211:2',
{ :namespace => NAME_OF_RAILS_APP, :expires_in => 1.day, :compress => true }
```
You can specify a port and a weight by appending to the server name. You may wish to increase the weight of a server with more memory configured. (e.g. to specify port 11211 and a weight of 2, append `:11211:2` )
If your servers are specified in `ENV["MEMCACHE_SERVERS"]` (e.g. on Heroku when using a third-party hosted addon), simply provide `nil` for the servers:
```ruby
config.cache_store = :dalli_store, nil, { :namespace => NAME_OF_RAILS_APP, :expires_in => 1.day, :compress => true }
```
To use Dalli for Rails session storage that times out after 20 minutes, in `config/initializers/session_store.rb`:
For Rails >= 3.2.4:
```ruby
Rails.application.config.session_store ActionDispatch::Session::CacheStore, :expire_after => 20.minutes
```
For Rails 3.x:
```ruby
require 'action_dispatch/middleware/session/dalli_store'
Rails.application.config.session_store :dalli_store, :memcache_server => ['host1', 'host2'], :namespace => 'sessions', :key => '_foundation_session', :expire_after => 20.minutes
```
Dalli does not support Rails 2.x.
Multithreading and Rails
--------------------------
If you use Puma or another threaded app server, as of Dalli 2.7, you can use a pool
of Dalli clients with Rails to ensure the `Rails.cache` singleton does not become a
source of thread contention. You must add `gem 'connection_pool'` to your Gemfile and
add :pool\_size to your `dalli_store` config:
```ruby
config.cache_store = :dalli_store, 'cache-1.example.com', { :pool_size => 5 }
```
You can then use the Rails cache as normal and Rails.cache will use the pool transparently under the covers, or you can check out a Dalli client directly from the pool:
```ruby
Rails.cache.fetch('foo', :expires_in => 300) do
'bar'
end
Rails.cache.dalli.with do |client|
# client is a Dalli::Client instance which you can
# use ONLY within this block
end
```
Configuration
------------------------
**servers**: An Array of "host:port:weight" where weight allows you to distribute cache unevenly.
Dalli::Client accepts the following options. All times are in seconds.
**expires_in**: Global default for key TTL. Default is 0, which means no expiry.
**namespace**: If specified, prepends each key with this value to provide simple namespacing. Default is nil.
**failover**: Boolean, if true Dalli will failover to another server if the main server for a key is down. Default is true.
**threadsafe**: Boolean. If true Dalli ensures that only one thread is using a socket at a given time. Default is true. Set to false at your own peril.
**serializer**: The serializer to use for objects being stored (ex. JSON).
Default is Marshal.
**compress**: Boolean, if true Dalli will gzip-compress values larger than 1K. Default is false.
**compression_min_size**: Minimum value byte size for which to attempt compression. Default is 1K.
**compression_max_size**: Maximum value byte size for which to attempt compression. Default is unlimited.
**compressor**: The compressor to use for objects being stored.
Default is zlib, implemented under `Dalli::Compressor`.
If serving compressed data using nginx's HttpMemcachedModule, set `memcached_gzip_flag 2` and use `Dalli::GzipCompressor`
**keepalive**: Boolean. If true, Dalli will enable keep-alive for socket connections. Default is true.
**socket_timeout**: Timeout for all socket operations (connect, read, write). Default is 0.5.
**socket_max_failures**: When a socket operation fails after socket_timeout, the same operation is retried. This is to not immediately mark a server down when there's a very slight network problem. Default is 2.
**socket_failure_delay**: Before retrying a socket operation, the process sleeps for this amount of time. Default is 0.01. Set to nil for no delay.
**down_retry_delay**: When a server has been marked down due to many failures, the server will be checked again for being alive only after this amount of time. Don't set this value too low, otherwise each request which tries the failed server might hang for the maximum **socket_timeout**. Default is 60 seconds.
**value_max_bytes**: The maximum size of a value in memcached. Defaults to 1MB, this can be increased with memcached's -I parameter. You must also configure Dalli to allow the larger size here.
**error_when_over_max_size**: Boolean. If true, Dalli will throw a Dalli::ValueOverMaxSize exception when trying to store data larger than **value_max_bytes**. Defaults to false, meaning only a warning is logged.
**username**: The username to use for authenticating this client instance against a SASL-enabled memcached server. Heroku users should not need to use this normally.
**password**: The password to use for authenticating this client instance against a SASL-enabled memcached server. Heroku users should not need to use this normally.
**sndbuf**: In bytes, set the socket SO_SNDBUF. Defaults to operating system default.
**rcvbuf**: In bytes, set the socket SO_RCVBUF. Defaults to operating system default.
**cache_nils**: Boolean. If true Dalli will not treat cached `nil` values as 'not found' for `#fetch` operations. Default is false.
**raise_errors**: Boolean. When true DalliStore will reraise Dalli:DalliError instead swallowing the error. Default is false.
**instrument_errors**: Boolean. When true DalliStore will send notification of Dalli::DalliError via a 'cache_error.active_support' event. Default is false.
Features and Changes
------------------------
By default, Dalli is thread-safe. Disable thread-safety at your own peril.
Dalli does not need anything special in Unicorn/Passenger since 2.0.4.
It will detect sockets shared with child processes and gracefully reopen the
socket.
Note that Dalli does not require ActiveSupport or Rails. You can safely use it in your own Ruby projects.
[View the Client API](http://www.rubydoc.info/github/mperham/dalli/Dalli/Client)
Helping Out
-------------
If you have a fix you wish to provide, please fork the code, fix in your local project and then send a pull request on github. Please ensure that you include a test which verifies your fix and update `History.md` with a one sentence description of your fix so you get credit as a contributor.
We're not accepting new compressors. They are trivial to add in an initializer. See #385 (LZ4), #406 (Snappy)
Thanks
------------
Mike Perham - for originally authoring the Dalli project and serving as maintainer and primary contributor
Eric Wong - for help using his [kgio](http://bogomips.org/kgio/) library.
Brian Mitchell - for his remix-stash project which was helpful when implementing and testing the binary protocol support.
[CouchBase](http://couchbase.com) - for their project sponsorship
Authors
----------
* [Peter M. Goldstein](https://github.com/petergoldstein) - current maintainer
* [Mike Perham](https://github.com/mperham) and contributors
Copyright
-----------
Copyright (c) Mike Perham, Peter M. Goldstein. See LICENSE for details.
dalli-2.7.9/Rakefile 0000664 0000000 0000000 00000001237 13362715446 0014301 0 ustar 00root root 0000000 0000000 require 'bundler/setup'
require 'bundler/gem_tasks'
require 'appraisal'
require 'rake/testtask'
Rake::TestTask.new(:test) do |test|
test.pattern = 'test/**/test_*.rb'
test.warning = true
test.verbose = true
end
task :default => :test
Rake::TestTask.new(:bench) do |test|
test.pattern = 'test/benchmark_test.rb'
end
task :test_all do
system('rake test RAILS_VERSION="~> 3.0.0"')
system('rake test RAILS_VERSION=">= 3.0.0"')
end
# 'gem install rdoc' to upgrade RDoc if this is giving you errors
require 'rdoc/task'
RDoc::Task.new do |rd|
rd.rdoc_files.include("lib/**/*.rb")
end
require 'rake/clean'
CLEAN.include "**/*.rbc"
CLEAN.include "**/.DS_Store"
dalli-2.7.9/code_of_conduct.md 0000664 0000000 0000000 00000004542 13362715446 0016275 0 ustar 00root root 0000000 0000000 # Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of
fostering an open and welcoming community, we pledge to respect all people who
contribute through reporting issues, posting feature requests, updating
documentation, submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free
experience for everyone, regardless of level of experience, gender, gender
identity and expression, sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
By adopting this Code of Conduct, project maintainers commit themselves to
fairly and consistently applying these principles to every aspect of managing
this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project maintainer at peter.m.goldstein AT gmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an
incident.
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.3.0, available at
[http://contributor-covenant.org/version/1/3/0/][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/3/0/
dalli-2.7.9/dalli.gemspec 0000664 0000000 0000000 00000001557 13362715446 0015273 0 ustar 00root root 0000000 0000000 require './lib/dalli/version'
Gem::Specification.new do |s|
s.name = "dalli"
s.version = Dalli::VERSION
s.license = "MIT"
s.authors = ['Peter M. Goldstein', 'Mike Perham']
s.description = s.summary = 'High performance memcached client for Ruby'
s.email = ['peter.m.goldstein@gmail.com', 'mperham@gmail.com']
s.files = Dir.glob('lib/**/*') + [
'LICENSE',
'README.md',
'History.md',
'Gemfile'
]
s.homepage = 'https://github.com/petergoldstein/dalli'
s.rdoc_options = ["--charset=UTF-8"]
s.add_development_dependency 'minitest', '>= 4.2.0'
s.add_development_dependency 'mocha'
s.add_development_dependency 'rails', '~> 4'
s.add_development_dependency 'rake'
s.add_development_dependency 'appraisal'
s.add_development_dependency 'connection_pool'
s.add_development_dependency 'rdoc'
s.add_development_dependency 'simplecov'
end
dalli-2.7.9/gemfiles/ 0000775 0000000 0000000 00000000000 13362715446 0014424 5 ustar 00root root 0000000 0000000 dalli-2.7.9/gemfiles/rails3.gemfile 0000664 0000000 0000000 00000000226 13362715446 0017153 0 ustar 00root root 0000000 0000000 # This file was generated by Appraisal
source 'https://rubygems.org'
gem 'kgio', platform: :mri
gem 'rails', '>= 3.2.0', '< 4'
gemspec path: '../'
dalli-2.7.9/gemfiles/rails4.gemfile 0000664 0000000 0000000 00000000226 13362715446 0017154 0 ustar 00root root 0000000 0000000 # This file was generated by Appraisal
source 'https://rubygems.org'
gem 'kgio', platform: :mri
gem 'rails', '>= 4.0.0', '< 5'
gemspec path: '../'
dalli-2.7.9/gemfiles/rails5.gemfile 0000664 0000000 0000000 00000000250 13362715446 0017152 0 ustar 00root root 0000000 0000000 # This file was generated by Appraisal
source 'https://rubygems.org'
gem 'kgio', platform: :mri
gem 'rails', '~> 5.0.0'
gem 'minitest', '< 5.10'
gemspec path: '../'
dalli-2.7.9/lib/ 0000775 0000000 0000000 00000000000 13362715446 0013377 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/action_dispatch/ 0000775 0000000 0000000 00000000000 13362715446 0016533 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/action_dispatch/middleware/ 0000775 0000000 0000000 00000000000 13362715446 0020650 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/action_dispatch/middleware/session/ 0000775 0000000 0000000 00000000000 13362715446 0022333 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/action_dispatch/middleware/session/dalli_store.rb 0000664 0000000 0000000 00000004377 13362715446 0025174 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'active_support/cache'
require 'action_dispatch/middleware/session/abstract_store'
require 'dalli'
# Dalli-based session store for Rails 3.0.
module ActionDispatch
module Session
class DalliStore < AbstractStore
def initialize(app, options = {})
# Support old :expires option
options[:expire_after] ||= options[:expires]
super
@default_options = { :namespace => 'rack:session' }.merge(@default_options)
@pool = options[:cache] || begin
Dalli::Client.new(
@default_options[:memcache_server], @default_options)
end
@namespace = @default_options[:namespace]
@raise_errors = !!@default_options[:raise_errors]
super
end
def reset
@pool.reset
end
private
def get_session(env, sid)
sid = generate_sid unless sid and !sid.empty?
begin
session = @pool.get(sid) || {}
rescue Dalli::DalliError => ex
# re-raise ArgumentError so Rails' session abstract_store.rb can autoload any missing models
raise ArgumentError, ex.message if ex.message =~ /unmarshal/
Rails.logger.warn("Session::DalliStore#get: #{ex.message}")
session = {}
end
[sid, session]
end
def set_session(env, sid, session_data, options = nil)
options ||= env[ENV_SESSION_OPTIONS_KEY]
expiry = options[:expire_after]
@pool.set(sid, session_data, expiry)
sid
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#set: #{$!.message}")
raise if @raise_errors
false
end
def destroy_session(env, session_id, options)
begin
@pool.delete(session_id)
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#destroy_session: #{$!.message}")
raise if @raise_errors
end
return nil if options[:drop]
generate_sid
end
def destroy(env)
if sid = current_session_id(env)
@pool.delete(sid)
end
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#destroy: #{$!.message}")
raise if @raise_errors
false
end
end
end
end
dalli-2.7.9/lib/active_support/ 0000775 0000000 0000000 00000000000 13362715446 0016446 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/active_support/cache/ 0000775 0000000 0000000 00000000000 13362715446 0017511 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/active_support/cache/dalli_store.rb 0000664 0000000 0000000 00000034073 13362715446 0022346 0 ustar 00root root 0000000 0000000 # encoding: ascii
# frozen_string_literal: true
require 'dalli'
module ActiveSupport
module Cache
class DalliStore
attr_reader :silence, :options
alias_method :silence?, :silence
# Silence the logger.
def silence!
@silence = true
self
end
# Silence the logger within a block.
def mute
previous_silence, @silence = defined?(@silence) && @silence, true
yield
ensure
@silence = previous_silence
end
ESCAPE_KEY_CHARS = /[\x00-\x20%\x7F-\xFF]/
# Creates a new DalliStore object, with the given memcached server
# addresses. Each address is either a host name, or a host-with-port string
# in the form of "host_name:port". For example:
#
# ActiveSupport::Cache::DalliStore.new("localhost", "server-downstairs.localnetwork:8229")
#
# If no addresses are specified, then DalliStore will connect to
# localhost port 11211 (the default memcached port).
#
# Connection Pool support
#
# If you are using multithreaded Rails, the Rails.cache singleton can become a source
# of contention. You can use a connection pool of Dalli clients with Rails.cache by
# passing :pool_size and/or :pool_timeout:
#
# config.cache_store = :dalli_store, 'localhost:11211', :pool_size => 10
#
# Both pool options default to 5. You must include the `connection_pool` gem if you
# wish to use pool support.
#
def initialize(*addresses)
addresses = addresses.flatten
options = addresses.extract_options!
@options = options.dup
pool_options = {}
pool_options[:size] = options[:pool_size] if options[:pool_size]
pool_options[:timeout] = options[:pool_timeout] if options[:pool_timeout]
@options[:compress] ||= @options[:compression]
addresses.compact!
servers = if addresses.empty?
nil # use the default from Dalli::Client
else
addresses
end
if pool_options.empty?
@data = Dalli::Client.new(servers, @options)
else
@data = ::ConnectionPool.new(pool_options) { Dalli::Client.new(servers, @options.merge(:threadsafe => false)) }
end
extend Strategy::LocalCache
extend LocalCacheEntryUnwrapAndRaw
end
##
# Access the underlying Dalli::Client or ConnectionPool instance for
# access to get_multi, etc.
def dalli
@data
end
def with(&block)
@data.with(&block)
end
# Fetch the value associated with the key.
# If a value is found, then it is returned.
#
# If a value is not found and no block is given, then nil is returned.
#
# If a value is not found (or if the found value is nil and :cache_nils is false)
# and a block is given, the block will be invoked and its return value
# written to the cache and returned.
def fetch(name, options=nil)
options ||= {}
options[:cache_nils] = true if @options[:cache_nils]
namespaced_name = namespaced_key(name, options)
not_found = options[:cache_nils] ? Dalli::Server::NOT_FOUND : nil
if block_given?
entry = not_found
unless options[:force]
entry = instrument_with_log(:read, namespaced_name, options) do |payload|
read_entry(namespaced_name, options).tap do |result|
if payload
payload[:super_operation] = :fetch
payload[:hit] = not_found != result
end
end
end
end
if not_found == entry
result = instrument_with_log(:generate, namespaced_name, options) do |payload|
yield(name)
end
write(name, result, options)
result
else
instrument_with_log(:fetch_hit, namespaced_name, options) { |payload| }
entry
end
else
read(name, options)
end
end
def read(name, options=nil)
options ||= {}
name = namespaced_key(name, options)
instrument_with_log(:read, name, options) do |payload|
entry = read_entry(name, options)
payload[:hit] = !entry.nil? if payload
entry
end
end
def write(name, value, options=nil)
options ||= {}
name = namespaced_key(name, options)
instrument_with_log(:write, name, options) do |payload|
with do |connection|
options = options.merge(:connection => connection)
write_entry(name, value, options)
end
end
end
def exist?(name, options=nil)
options ||= {}
name = namespaced_key(name, options)
log(:exist, name, options)
!read_entry(name, options).nil?
end
def delete(name, options=nil)
options ||= {}
name = namespaced_key(name, options)
instrument_with_log(:delete, name, options) do |payload|
delete_entry(name, options)
end
end
# Reads multiple keys from the cache using a single call to the
# servers for all keys. Keys must be Strings.
def read_multi(*names)
options = names.extract_options!
mapping = names.inject({}) { |memo, name| memo[namespaced_key(name, options)] = name; memo }
instrument_with_log(:read_multi, mapping.keys) do
results = {}
if local_cache
mapping.each_key do |key|
if value = local_cache.read_entry(key, options)
results[key] = value
end
end
end
data = with { |c| c.get_multi(mapping.keys - results.keys) }
results.merge!(data)
results.inject({}) do |memo, (inner, _)|
entry = results[inner]
# NB Backwards data compatibility, to be removed at some point
value = (entry.is_a?(ActiveSupport::Cache::Entry) ? entry.value : entry)
memo[mapping[inner]] = value
local_cache.write_entry(inner, value, options) if local_cache
memo
end
end
end
# Fetches data from the cache, using the given keys. If there is data in
# the cache with the given keys, then that data is returned. Otherwise,
# the supplied block is called for each key for which there was no data,
# and the result will be written to the cache and returned.
def fetch_multi(*names)
options = names.extract_options!
mapping = names.inject({}) { |memo, name| memo[namespaced_key(name, options)] = name; memo }
instrument_with_log(:fetch_multi, mapping.keys) do
with do |connection|
results = connection.get_multi(mapping.keys)
connection.multi do
mapping.inject({}) do |memo, (expanded, name)|
memo[name] = results[expanded]
if memo[name].nil?
value = yield(name)
memo[name] = value
options = options.merge(:connection => connection)
write_entry(expanded, value, options)
end
memo
end
end
end
end
end
# Increment a cached value. This method uses the memcached incr atomic
# operator and can only be used on values written with the :raw option.
# Calling it on a value not stored with :raw will fail.
# :initial defaults to the amount passed in, as if the counter was initially zero.
# memcached counters cannot hold negative values.
def increment(name, amount = 1, options=nil)
options ||= {}
name = namespaced_key(name, options)
initial = options.has_key?(:initial) ? options[:initial] : amount
expires_in = options[:expires_in]
instrument_with_log(:increment, name, :amount => amount) do
with { |c| c.incr(name, amount, expires_in, initial) }
end
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
nil
end
# Decrement a cached value. This method uses the memcached decr atomic
# operator and can only be used on values written with the :raw option.
# Calling it on a value not stored with :raw will fail.
# :initial defaults to zero, as if the counter was initially zero.
# memcached counters cannot hold negative values.
def decrement(name, amount = 1, options=nil)
options ||= {}
name = namespaced_key(name, options)
initial = options.has_key?(:initial) ? options[:initial] : 0
expires_in = options[:expires_in]
instrument_with_log(:decrement, name, :amount => amount) do
with { |c| c.decr(name, amount, expires_in, initial) }
end
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
nil
end
# Clear the entire cache on all memcached servers. This method should
# be used with care when using a shared cache.
def clear(options=nil)
instrument_with_log(:clear, 'flushing all keys') do
with { |c| c.flush_all }
end
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
nil
end
# Clear any local cache
def cleanup(options=nil)
end
# Get the statistics from the memcached servers.
def stats
with { |c| c.stats }
end
def reset
with { |c| c.reset }
end
def logger
Dalli.logger
end
def logger=(new_logger)
Dalli.logger = new_logger
end
protected
# Read an entry from the cache.
def read_entry(key, options) # :nodoc:
entry = with { |c| c.get(key, options) }
# NB Backwards data compatibility, to be removed at some point
entry.is_a?(ActiveSupport::Cache::Entry) ? entry.value : entry
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
nil
end
# Write an entry to the cache.
def write_entry(key, value, options) # :nodoc:
# cleanup LocalCache
cleanup if options[:unless_exist]
method = options[:unless_exist] ? :add : :set
expires_in = options[:expires_in]
connection = options.delete(:connection)
connection.send(method, key, value, expires_in, options)
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
false
end
# Delete an entry from the cache.
def delete_entry(key, options) # :nodoc:
with { |c| c.delete(key) }
rescue Dalli::DalliError => e
log_dalli_error(e)
instrument_error(e) if instrument_errors?
raise if raise_errors?
false
end
private
def namespaced_key(key, options)
key = expanded_key(key)
namespace = options[:namespace] if options
prefix = namespace.is_a?(Proc) ? namespace.call : namespace
key = "#{prefix}:#{key}" if prefix
key = "#{key[0, 213]}:md5:#{::Digest::MD5.hexdigest(key)}" if key && key.size > 250
key
end
alias :normalize_key :namespaced_key
# Expand key to be a consistent string value. Invokes +cache_key_with_version+
# first to support Rails 5.2 cache versioning.
# Invoke +cache_key+ if object responds to +cache_key+. Otherwise, to_param method
# will be called. If the key is a Hash, then keys will be sorted alphabetically.
def expanded_key(key) # :nodoc:
return key.cache_key_with_version.to_s if key.respond_to?(:cache_key_with_version)
return key.cache_key.to_s if key.respond_to?(:cache_key)
original_object_id = key.object_id
case key
when Array
if key.size > 1
key = key.collect{|element| expanded_key(element)}
else
key = key.first
end
when Hash
key = key.sort_by { |k,_| k.to_s }.collect{|k,v| "#{k}=#{v}"}
end
key = key.to_param
if key.respond_to? :force_encoding
key = key.dup if key.object_id == original_object_id
key.force_encoding('binary')
end
key
end
def log_dalli_error(error)
logger.error("DalliError: #{error.message}") if logger
end
def instrument_with_log(operation, key, options=nil)
log(operation, key, options)
payload = { :key => key }
payload.merge!(options) if options.is_a?(Hash)
instrument(operation, payload) { |p| yield(p) }
end
def instrument_error(error)
instrument(:error, { :key => 'DalliError', :message => error.message })
end
def instrument(operation, payload)
ActiveSupport::Notifications.instrument("cache_#{operation}.active_support", payload) do
yield(payload) if block_given?
end
end
def log(operation, key, options=nil)
return unless logger && logger.debug? && !silence?
logger.debug("Cache #{operation}: #{key}#{options.blank? ? "" : " (#{options.inspect})"}")
end
def raise_errors?
!!@options[:raise_errors]
end
def instrument_errors?
!!@options[:instrument_errors]
end
# Make sure LocalCache is giving raw values, not `Entry`s, and
# respect `raw` option.
module LocalCacheEntryUnwrapAndRaw # :nodoc:
protected
def read_entry(key, options)
retval = super
if retval.is_a? ActiveSupport::Cache::Entry
# Must have come from LocalStore, unwrap it
if options[:raw]
retval.value.to_s
else
retval.value
end
else
retval
end
end
end
end
end
end
dalli-2.7.9/lib/dalli.rb 0000664 0000000 0000000 00000002505 13362715446 0015013 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'dalli/compressor'
require 'dalli/client'
require 'dalli/ring'
require 'dalli/server'
require 'dalli/socket'
require 'dalli/version'
require 'dalli/options'
require 'dalli/railtie' if defined?(::Rails::Railtie)
module Dalli
# generic error
class DalliError < RuntimeError; end
# socket/server communication error
class NetworkError < DalliError; end
# no server available/alive error
class RingError < DalliError; end
# application error in marshalling serialization
class MarshalError < DalliError; end
# application error in marshalling deserialization or decompression
class UnmarshalError < DalliError; end
# payload too big for memcached
class ValueOverMaxSize < RuntimeError; end
def self.logger
@logger ||= (rails_logger || default_logger)
end
def self.rails_logger
(defined?(Rails) && Rails.respond_to?(:logger) && Rails.logger) ||
(defined?(RAILS_DEFAULT_LOGGER) && RAILS_DEFAULT_LOGGER.respond_to?(:debug) && RAILS_DEFAULT_LOGGER)
end
def self.default_logger
require 'logger'
l = Logger.new(STDOUT)
l.level = Logger::INFO
l
end
def self.logger=(logger)
@logger = logger
end
end
if defined?(RAILS_VERSION) && RAILS_VERSION < '3'
raise Dalli::DalliError, "Dalli #{Dalli::VERSION} does not support Rails version < 3.0"
end
dalli-2.7.9/lib/dalli/ 0000775 0000000 0000000 00000000000 13362715446 0014464 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/dalli/cas/ 0000775 0000000 0000000 00000000000 13362715446 0015232 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/dalli/cas/client.rb 0000664 0000000 0000000 00000003414 13362715446 0017037 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'dalli/client'
module Dalli
class Client
##
# Get the value and CAS ID associated with the key. If a block is provided,
# value and CAS will be passed to the block.
def get_cas(key)
(value, cas) = perform(:cas, key)
value = (!value || value == 'Not found') ? nil : value
if block_given?
yield value, cas
else
[value, cas]
end
end
##
# Fetch multiple keys efficiently, including available metadata such as CAS.
# If a block is given, yields key/data pairs one a time. Data is an array:
# [value, cas_id]
# If no block is given, returns a hash of
# { 'key' => [value, cas_id] }
def get_multi_cas(*keys)
if block_given?
get_multi_yielder(keys) {|*args| yield(*args)}
else
Hash.new.tap do |hash|
get_multi_yielder(keys) {|k, data| hash[k] = data}
end
end
end
##
# Set the key-value pair, verifying existing CAS.
# Returns the resulting CAS value if succeeded, and falsy otherwise.
def set_cas(key, value, cas, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:set, key, value, ttl, cas, options)
end
##
# Conditionally add a key/value pair, verifying existing CAS, only if the
# key already exists on the server. Returns the new CAS value if the
# operation succeeded, or falsy otherwise.
def replace_cas(key, value, cas, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:replace, key, value, ttl, cas, options)
end
# Delete a key/value pair, verifying existing CAS.
# Returns true if succeeded, and falsy otherwise.
def delete_cas(key, cas=0)
perform(:delete, key, cas)
end
end
end
dalli-2.7.9/lib/dalli/client.rb 0000664 0000000 0000000 00000037141 13362715446 0016275 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'digest/md5'
require 'set'
# encoding: ascii
module Dalli
class Client
##
# Dalli::Client is the main class which developers will use to interact with
# the memcached server. Usage:
#
# Dalli::Client.new(['localhost:11211:10', 'cache-2.example.com:11211:5', '192.168.0.1:22122:5', '/var/run/memcached/socket'],
# :threadsafe => true, :failover => true, :expires_in => 300)
#
# servers is an Array of "host:port:weight" where weight allows you to distribute cache unevenly.
# Both weight and port are optional. If you pass in nil, Dalli will use the MEMCACHE_SERVERS
# environment variable or default to 'localhost:11211' if it is not present. Dalli also supports
# the ability to connect to Memcached on localhost through a UNIX socket. To use this functionality,
# use a full pathname (beginning with a slash character '/') in place of the "host:port" pair in
# the server configuration.
#
# Options:
# - :namespace - prepend each key with this value to provide simple namespacing.
# - :failover - if a server is down, look for and store values on another server in the ring. Default: true.
# - :threadsafe - ensure that only one thread is actively using a socket at a time. Default: true.
# - :expires_in - default TTL in seconds if you do not pass TTL as a parameter to an individual operation, defaults to 0 or forever
# - :compress - defaults to false, if true Dalli will compress values larger than 1024 bytes before sending them to memcached.
# - :serializer - defaults to Marshal
# - :compressor - defaults to zlib
# - :cache_nils - defaults to false, if true Dalli will not treat cached nil values as 'not found' for #fetch operations.
#
def initialize(servers=nil, options={})
@servers = normalize_servers(servers || ENV["MEMCACHE_SERVERS"] || '127.0.0.1:11211')
@options = normalize_options(options)
@ring = nil
end
#
# The standard memcached instruction set
#
##
# Turn on quiet aka noreply support.
# All relevant operations within this block will be effectively
# pipelined as Dalli will use 'quiet' operations where possible.
# Currently supports the set, add, replace and delete operations.
def multi
old, Thread.current[:dalli_multi] = Thread.current[:dalli_multi], true
yield
ensure
Thread.current[:dalli_multi] = old
end
##
# Get the value associated with the key.
# If a value is not found, then +nil+ is returned.
def get(key, options=nil)
perform(:get, key, options)
end
##
# Fetch multiple keys efficiently.
# If a block is given, yields key/value pairs one at a time.
# Otherwise returns a hash of { 'key' => 'value', 'key2' => 'value1' }
def get_multi(*keys)
check_keys = keys.flatten
check_keys.compact!
return {} if check_keys.empty?
if block_given?
get_multi_yielder(keys) {|k, data| yield k, data.first}
else
Hash.new.tap do |hash|
get_multi_yielder(keys) {|k, data| hash[k] = data.first}
end
end
end
CACHE_NILS = {cache_nils: true}.freeze
# Fetch the value associated with the key.
# If a value is found, then it is returned.
#
# If a value is not found and no block is given, then nil is returned.
#
# If a value is not found (or if the found value is nil and :cache_nils is false)
# and a block is given, the block will be invoked and its return value
# written to the cache and returned.
def fetch(key, ttl=nil, options=nil)
options = options.nil? ? CACHE_NILS : options.merge(CACHE_NILS) if @options[:cache_nils]
val = get(key, options)
not_found = @options[:cache_nils] ?
val == Dalli::Server::NOT_FOUND :
val.nil?
if not_found && block_given?
val = yield
add(key, val, ttl_or_default(ttl), options)
end
val
end
##
# compare and swap values using optimistic locking.
# Fetch the existing value for key.
# If it exists, yield the value to the block.
# Add the block's return value as the new value for the key.
# Add will fail if someone else changed the value.
#
# Returns:
# - nil if the key did not exist.
# - false if the value was changed by someone else.
# - true if the value was successfully updated.
def cas(key, ttl=nil, options=nil, &block)
cas_core(key, false, ttl, options, &block)
end
##
# like #cas, but will yield to the block whether or not the value
# already exists.
#
# Returns:
# - false if the value was changed by someone else.
# - true if the value was successfully updated.
def cas!(key, ttl=nil, options=nil, &block)
cas_core(key, true, ttl, options, &block)
end
def set(key, value, ttl=nil, options=nil)
perform(:set, key, value, ttl_or_default(ttl), 0, options)
end
##
# Conditionally add a key/value pair, if the key does not already exist
# on the server. Returns truthy if the operation succeeded.
def add(key, value, ttl=nil, options=nil)
perform(:add, key, value, ttl_or_default(ttl), options)
end
##
# Conditionally add a key/value pair, only if the key already exists
# on the server. Returns truthy if the operation succeeded.
def replace(key, value, ttl=nil, options=nil)
perform(:replace, key, value, ttl_or_default(ttl), 0, options)
end
def delete(key)
perform(:delete, key, 0)
end
##
# Append value to the value already stored on the server for 'key'.
# Appending only works for values stored with :raw => true.
def append(key, value)
perform(:append, key, value.to_s)
end
##
# Prepend value to the value already stored on the server for 'key'.
# Prepending only works for values stored with :raw => true.
def prepend(key, value)
perform(:prepend, key, value.to_s)
end
def flush(delay=0)
time = -delay
ring.servers.map { |s| s.request(:flush, time += delay) }
end
alias_method :flush_all, :flush
##
# Incr adds the given amount to the counter on the memcached server.
# Amt must be a positive integer value.
#
# If default is nil, the counter must already exist or the operation
# will fail and will return nil. Otherwise this method will return
# the new value for the counter.
#
# Note that the ttl will only apply if the counter does not already
# exist. To increase an existing counter and update its TTL, use
# #cas.
def incr(key, amt=1, ttl=nil, default=nil)
raise ArgumentError, "Positive values only: #{amt}" if amt < 0
perform(:incr, key, amt.to_i, ttl_or_default(ttl), default)
end
##
# Decr subtracts the given amount from the counter on the memcached server.
# Amt must be a positive integer value.
#
# memcached counters are unsigned and cannot hold negative values. Calling
# decr on a counter which is 0 will just return 0.
#
# If default is nil, the counter must already exist or the operation
# will fail and will return nil. Otherwise this method will return
# the new value for the counter.
#
# Note that the ttl will only apply if the counter does not already
# exist. To decrease an existing counter and update its TTL, use
# #cas.
def decr(key, amt=1, ttl=nil, default=nil)
raise ArgumentError, "Positive values only: #{amt}" if amt < 0
perform(:decr, key, amt.to_i, ttl_or_default(ttl), default)
end
##
# Touch updates expiration time for a given key.
#
# Returns true if key exists, otherwise nil.
def touch(key, ttl=nil)
resp = perform(:touch, key, ttl_or_default(ttl))
resp.nil? ? nil : true
end
##
# Collect the stats for each server.
# You can optionally pass a type including :items, :slabs or :settings to get specific stats
# Returns a hash like { 'hostname:port' => { 'stat1' => 'value1', ... }, 'hostname2:port' => { ... } }
def stats(type=nil)
type = nil if ![nil, :items,:slabs,:settings].include? type
values = {}
ring.servers.each do |server|
values["#{server.name}"] = server.alive? ? server.request(:stats,type.to_s) : nil
end
values
end
##
# Reset stats for each server.
def reset_stats
ring.servers.map do |server|
server.alive? ? server.request(:reset_stats) : nil
end
end
##
## Make sure memcache servers are alive, or raise an Dalli::RingError
def alive!
ring.server_for_key("")
end
##
## Version of the memcache servers.
def version
values = {}
ring.servers.each do |server|
values["#{server.name}"] = server.alive? ? server.request(:version) : nil
end
values
end
##
# Close our connection to each server.
# If you perform another operation after this, the connections will be re-established.
def close
if @ring
@ring.servers.each { |s| s.close }
@ring = nil
end
end
alias_method :reset, :close
# Stub method so a bare Dalli client can pretend to be a connection pool.
def with
yield self
end
private
def cas_core(key, always_set, ttl=nil, options=nil)
(value, cas) = perform(:cas, key)
value = (!value || value == 'Not found') ? nil : value
return if value.nil? && !always_set
newvalue = yield(value)
perform(:set, key, newvalue, ttl_or_default(ttl), cas, options)
end
def ttl_or_default(ttl)
(ttl || @options[:expires_in]).to_i
rescue NoMethodError
raise ArgumentError, "Cannot convert ttl (#{ttl}) to an integer"
end
def groups_for_keys(*keys)
groups = mapped_keys(keys).flatten.group_by do |key|
begin
ring.server_for_key(key)
rescue Dalli::RingError
Dalli.logger.debug { "unable to get key #{key}" }
nil
end
end
return groups
end
def mapped_keys(keys)
keys_array = keys.flatten
keys_array.map! { |a| validate_key(a.to_s) }
keys_array
end
def make_multi_get_requests(groups)
groups.each do |server, keys_for_server|
begin
# TODO: do this with the perform chokepoint?
# But given the fact that fetching the response doesn't take place
# in that slot it's misleading anyway. Need to move all of this method
# into perform to be meaningful
server.request(:send_multiget, keys_for_server)
rescue DalliError, NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "unable to get keys for server #{server.name}" }
end
end
end
def perform_multi_response_start(servers)
servers.each do |server|
next unless server.alive?
begin
server.multi_response_start
rescue DalliError, NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "results from this server will be missing" }
servers.delete(server)
end
end
servers
end
##
# Normalizes the argument into an array of servers.
# If the argument is a string, it's expected that the URIs are comma separated e.g.
# "memcache1.example.com:11211,memcache2.example.com:11211,memcache3.example.com:11211"
def normalize_servers(servers)
if servers.is_a? String
return servers.split(",")
else
return servers
end
end
def ring
@ring ||= Dalli::Ring.new(
@servers.map do |s|
server_options = {}
if s =~ %r{\Amemcached://}
uri = URI.parse(s)
server_options[:username] = uri.user
server_options[:password] = uri.password
s = "#{uri.host}:#{uri.port}"
end
Dalli::Server.new(s, @options.merge(server_options))
end, @options
)
end
# Chokepoint method for instrumentation
def perform(*all_args)
return yield if block_given?
op, key, *args = *all_args
key = key.to_s
key = validate_key(key)
begin
server = ring.server_for_key(key)
ret = server.request(op, key, *args)
ret
rescue NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "retrying request with new server" }
retry
end
end
def validate_key(key)
raise ArgumentError, "key cannot be blank" if !key || key.length == 0
key = key_with_namespace(key)
if key.length > 250
max_length_before_namespace = 212 - (namespace || '').size
key = "#{key[0, max_length_before_namespace]}:md5:#{Digest::MD5.hexdigest(key)}"
end
return key
end
def key_with_namespace(key)
(ns = namespace) ? "#{ns}:#{key}" : key
end
def key_without_namespace(key)
(ns = namespace) ? key.sub(%r(\A#{Regexp.escape ns}:), '') : key
end
def namespace
return nil unless @options[:namespace]
@options[:namespace].is_a?(Proc) ? @options[:namespace].call.to_s : @options[:namespace].to_s
end
def normalize_options(opts)
if opts[:compression]
Dalli.logger.warn "DEPRECATED: Dalli's :compression option is now just :compress => true. Please update your configuration."
opts[:compress] = opts.delete(:compression)
end
begin
opts[:expires_in] = opts[:expires_in].to_i if opts[:expires_in]
rescue NoMethodError
raise ArgumentError, "cannot convert :expires_in => #{opts[:expires_in].inspect} to an integer"
end
opts
end
##
# Yields, one at a time, keys and their values+attributes.
def get_multi_yielder(keys)
perform do
return {} if keys.empty?
ring.lock do
begin
groups = groups_for_keys(keys)
if unfound_keys = groups.delete(nil)
Dalli.logger.debug { "unable to get keys for #{unfound_keys.length} keys because no matching server was found" }
end
make_multi_get_requests(groups)
servers = groups.keys
return if servers.empty?
servers = perform_multi_response_start(servers)
start = Time.now
while true
# remove any dead servers
servers.delete_if { |s| s.sock.nil? }
break if servers.empty?
# calculate remaining timeout
elapsed = Time.now - start
timeout = servers.first.options[:socket_timeout]
time_left = (elapsed > timeout) ? 0 : timeout - elapsed
sockets = servers.map(&:sock)
readable, _ = IO.select(sockets, nil, nil, time_left)
if readable.nil?
# no response within timeout; abort pending connections
servers.each do |server|
Dalli.logger.debug { "memcached at #{server.name} did not response within timeout" }
server.multi_response_abort
end
break
else
readable.each do |sock|
server = sock.server
begin
server.multi_response_nonblock.each_pair do |key, value_list|
yield key_without_namespace(key), value_list
end
if server.multi_response_completed?
servers.delete(server)
end
rescue NetworkError
servers.delete(server)
end
end
end
end
end
end
end
end
end
end
dalli-2.7.9/lib/dalli/compressor.rb 0000664 0000000 0000000 00000001060 13362715446 0017202 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'zlib'
require 'stringio'
module Dalli
class Compressor
def self.compress(data)
Zlib::Deflate.deflate(data)
end
def self.decompress(data)
Zlib::Inflate.inflate(data)
end
end
class GzipCompressor
def self.compress(data)
io = StringIO.new(String.new(""), "w")
gz = Zlib::GzipWriter.new(io)
gz.write(data)
gz.close
io.string
end
def self.decompress(data)
io = StringIO.new(data, "rb")
Zlib::GzipReader.new(io).read
end
end
end
dalli-2.7.9/lib/dalli/options.rb 0000664 0000000 0000000 00000001677 13362715446 0016517 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'thread'
require 'monitor'
module Dalli
# Make Dalli threadsafe by using a lock around all
# public server methods.
#
# Dalli::Server.extend(Dalli::Threadsafe)
#
module Threadsafe
def self.extended(obj)
obj.init_threadsafe
end
def request(op, *args)
@lock.synchronize do
super
end
end
def alive?
@lock.synchronize do
super
end
end
def close
@lock.synchronize do
super
end
end
def multi_response_start
@lock.synchronize do
super
end
end
def multi_response_nonblock
@lock.synchronize do
super
end
end
def multi_response_abort
@lock.synchronize do
super
end
end
def lock!
@lock.mon_enter
end
def unlock!
@lock.mon_exit
end
def init_threadsafe
@lock = Monitor.new
end
end
end
dalli-2.7.9/lib/dalli/railtie.rb 0000664 0000000 0000000 00000000253 13362715446 0016442 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
module Dalli
class Railtie < ::Rails::Railtie
config.before_configuration do
config.cache_store = :dalli_store
end
end
end
dalli-2.7.9/lib/dalli/ring.rb 0000664 0000000 0000000 00000007065 13362715446 0015760 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'digest/sha1'
require 'zlib'
module Dalli
class Ring
POINTS_PER_SERVER = 160 # this is the default in libmemcached
attr_accessor :servers, :continuum
def initialize(servers, options)
@servers = servers
@continuum = nil
if servers.size > 1
total_weight = servers.inject(0) { |memo, srv| memo + srv.weight }
continuum = []
servers.each do |server|
entry_count_for(server, servers.size, total_weight).times do |idx|
hash = Digest::SHA1.hexdigest("#{server.name}:#{idx}")
value = Integer("0x#{hash[0..7]}")
continuum << Dalli::Ring::Entry.new(value, server)
end
end
@continuum = continuum.sort_by(&:value)
end
threadsafe! unless options[:threadsafe] == false
@failover = options[:failover] != false
end
def server_for_key(key)
if @continuum
hkey = hash_for(key)
20.times do |try|
entryidx = binary_search(@continuum, hkey)
server = @continuum[entryidx].server
return server if server.alive?
break unless @failover
hkey = hash_for("#{try}#{key}")
end
else
server = @servers.first
return server if server && server.alive?
end
raise Dalli::RingError, "No server available"
end
def lock
@servers.each(&:lock!)
begin
return yield
ensure
@servers.each(&:unlock!)
end
end
private
def threadsafe!
@servers.each do |s|
s.extend(Dalli::Threadsafe)
end
end
def hash_for(key)
Zlib.crc32(key)
end
def entry_count_for(server, total_servers, total_weight)
((total_servers * POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
end
# Native extension to perform the binary search within the continuum
# space. Fallback to a pure Ruby version if the compilation doesn't work.
# optional for performance and only necessary if you are using multiple
# memcached servers.
begin
require 'inline'
inline do |builder|
builder.c <<-EOM
int binary_search(VALUE ary, unsigned int r) {
long upper = RARRAY_LEN(ary) - 1;
long lower = 0;
long idx = 0;
ID value = rb_intern("value");
VALUE continuumValue;
unsigned int l;
while (lower <= upper) {
idx = (lower + upper) / 2;
continuumValue = rb_funcall(RARRAY_PTR(ary)[idx], value, 0);
l = NUM2UINT(continuumValue);
if (l == r) {
return idx;
}
else if (l > r) {
upper = idx - 1;
}
else {
lower = idx + 1;
}
}
return upper;
}
EOM
end
rescue LoadError
# Find the closest index in the Ring with value <= the given value
def binary_search(ary, value)
upper = ary.size - 1
lower = 0
while (lower <= upper) do
idx = (lower + upper) / 2
comp = ary[idx].value <=> value
if comp == 0
return idx
elsif comp > 0
upper = idx - 1
else
lower = idx + 1
end
end
upper
end
end
class Entry
attr_reader :value
attr_reader :server
def initialize(val, srv)
@value = val
@server = srv
end
end
end
end
dalli-2.7.9/lib/dalli/server.rb 0000664 0000000 0000000 00000055315 13362715446 0016330 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'socket'
require 'timeout'
module Dalli
class Server
attr_accessor :hostname
attr_accessor :port
attr_accessor :weight
attr_accessor :options
attr_reader :sock
attr_reader :socket_type # possible values: :unix, :tcp
DEFAULT_PORT = 11211
DEFAULT_WEIGHT = 1
DEFAULTS = {
# seconds between trying to contact a remote server
:down_retry_delay => 60,
# connect/read/write timeout for socket operations
:socket_timeout => 0.5,
# times a socket operation may fail before considering the server dead
:socket_max_failures => 2,
# amount of time to sleep between retries when a failure occurs
:socket_failure_delay => 0.01,
# max size of value in bytes (default is 1 MB, can be overriden with "memcached -I ")
:value_max_bytes => 1024 * 1024,
# surpassing value_max_bytes either warns (false) or throws (true)
:error_when_over_max_size => false,
:compressor => Compressor,
# min byte size to attempt compression
:compression_min_size => 1024,
# max byte size for compression
:compression_max_size => false,
:serializer => Marshal,
:username => nil,
:password => nil,
:keepalive => true,
# max byte size for SO_SNDBUF
:sndbuf => nil,
# max byte size for SO_RCVBUF
:rcvbuf => nil
}
def initialize(attribs, options = {})
@hostname, @port, @weight, @socket_type = parse_hostname(attribs)
@fail_count = 0
@down_at = nil
@last_down_at = nil
@options = DEFAULTS.merge(options)
@sock = nil
@msg = nil
@error = nil
@pid = nil
@inprogress = nil
end
def name
if socket_type == :unix
hostname
else
"#{hostname}:#{port}"
end
end
# Chokepoint method for instrumentation
def request(op, *args)
verify_state
raise Dalli::NetworkError, "#{name} is down: #{@error} #{@msg}. If you are sure it is running, ensure memcached version is > 1.4." unless alive?
begin
send(op, *args)
rescue Dalli::MarshalError => ex
Dalli.logger.error "Marshalling error for key '#{args.first}': #{ex.message}"
Dalli.logger.error "You are trying to cache a Ruby object which cannot be serialized to memcached."
Dalli.logger.error ex.backtrace.join("\n\t")
false
rescue Dalli::DalliError, Dalli::NetworkError, Dalli::ValueOverMaxSize, Timeout::Error
raise
rescue => ex
Dalli.logger.error "Unexpected exception during Dalli request: #{ex.class.name}: #{ex.message}"
Dalli.logger.error ex.backtrace.join("\n\t")
down!
end
end
def alive?
return true if @sock
if @last_down_at && @last_down_at + options[:down_retry_delay] >= Time.now
time = @last_down_at + options[:down_retry_delay] - Time.now
Dalli.logger.debug { "down_retry_delay not reached for #{name} (%.3f seconds left)" % time }
return false
end
connect
!!@sock
rescue Dalli::NetworkError
false
end
def close
return unless @sock
@sock.close rescue nil
@sock = nil
@pid = nil
@inprogress = false
end
def lock!
end
def unlock!
end
def serializer
@options[:serializer]
end
def compressor
@options[:compressor]
end
# Start reading key/value pairs from this connection. This is usually called
# after a series of GETKQ commands. A NOOP is sent, and the server begins
# flushing responses for kv pairs that were found.
#
# Returns nothing.
def multi_response_start
verify_state
write_noop
@multi_buffer = String.new('')
@position = 0
@inprogress = true
end
# Did the last call to #multi_response_start complete successfully?
def multi_response_completed?
@multi_buffer.nil?
end
# Attempt to receive and parse as many key/value pairs as possible
# from this server. After #multi_response_start, this should be invoked
# repeatedly whenever this server's socket is readable until
# #multi_response_completed?.
#
# Returns a Hash of kv pairs received.
def multi_response_nonblock
raise 'multi_response has completed' if @multi_buffer.nil?
@multi_buffer << @sock.read_available
buf = @multi_buffer
pos = @position
values = {}
while buf.bytesize - pos >= 24
header = buf.slice(pos, 24)
(key_length, _, body_length, cas) = header.unpack(KV_HEADER)
if key_length == 0
# all done!
@multi_buffer = nil
@position = nil
@inprogress = false
break
elsif buf.bytesize - pos >= 24 + body_length
flags = buf.slice(pos + 24, 4).unpack('N')[0]
key = buf.slice(pos + 24 + 4, key_length)
value = buf.slice(pos + 24 + 4 + key_length, body_length - key_length - 4) if body_length - key_length - 4 > 0
pos = pos + 24 + body_length
begin
values[key] = [deserialize(value, flags), cas]
rescue DalliError
end
else
# not enough data yet, wait for more
break
end
end
@position = pos
values
rescue SystemCallError, Timeout::Error, EOFError => e
failure!(e)
end
# Abort an earlier #multi_response_start. Used to signal an external
# timeout. The underlying socket is disconnected, and the exception is
# swallowed.
#
# Returns nothing.
def multi_response_abort
@multi_buffer = nil
@position = nil
@inprogress = false
failure!(RuntimeError.new('External timeout'))
rescue NetworkError
true
end
# NOTE: Additional public methods should be overridden in Dalli::Threadsafe
private
def verify_state
failure!(RuntimeError.new('Already writing to socket')) if @inprogress
if @pid && @pid != Process.pid
message = 'Fork detected, re-connecting child process...'
Dalli.logger.info { message }
reconnect! message
end
end
def reconnect!(message)
close
sleep(options[:socket_failure_delay]) if options[:socket_failure_delay]
raise Dalli::NetworkError, message
end
def failure!(exception)
message = "#{name} failed (count: #{@fail_count}) #{exception.class}: #{exception.message}"
Dalli.logger.warn { message }
@fail_count += 1
if @fail_count >= options[:socket_max_failures]
down!
else
reconnect! 'Socket operation failed, retrying...'
end
end
def down!
close
@last_down_at = Time.now
if @down_at
time = Time.now - @down_at
Dalli.logger.debug { "#{name} is still down (for %.3f seconds now)" % time }
else
@down_at = @last_down_at
Dalli.logger.warn { "#{name} is down" }
end
@error = $! && $!.class.name
@msg = @msg || ($! && $!.message && !$!.message.empty? && $!.message)
raise Dalli::NetworkError, "#{name} is down: #{@error} #{@msg}"
end
def up!
if @down_at
time = Time.now - @down_at
Dalli.logger.warn { "#{name} is back (downtime was %.3f seconds)" % time }
end
@fail_count = 0
@down_at = nil
@last_down_at = nil
@msg = nil
@error = nil
end
def multi?
Thread.current[:dalli_multi]
end
def get(key, options=nil)
req = [REQUEST, OPCODES[:get], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:get])
write(req)
generic_response(true, !!(options && options.is_a?(Hash) && options[:cache_nils]))
end
def send_multiget(keys)
req = String.new("")
keys.each do |key|
req << [REQUEST, OPCODES[:getkq], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:getkq])
end
# Could send noop here instead of in multi_response_start
write(req)
end
def set(key, value, ttl, cas, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :setq : :set], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, cas, flags, ttl, key, value].pack(FORMAT[:set])
write(req)
cas_response unless multi?
end
end
def add(key, value, ttl, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :addq : :add], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, 0, flags, ttl, key, value].pack(FORMAT[:add])
write(req)
cas_response unless multi?
end
end
def replace(key, value, ttl, cas, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :replaceq : :replace], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, cas, flags, ttl, key, value].pack(FORMAT[:replace])
write(req)
cas_response unless multi?
end
end
def delete(key, cas)
req = [REQUEST, OPCODES[multi? ? :deleteq : :delete], key.bytesize, 0, 0, 0, key.bytesize, 0, cas, key].pack(FORMAT[:delete])
write(req)
generic_response unless multi?
end
def flush(ttl)
req = [REQUEST, OPCODES[:flush], 0, 4, 0, 0, 4, 0, 0, 0].pack(FORMAT[:flush])
write(req)
generic_response
end
def decr_incr(opcode, key, count, ttl, default)
expiry = default ? sanitize_ttl(ttl) : 0xFFFFFFFF
default ||= 0
(h, l) = split(count)
(dh, dl) = split(default)
req = [REQUEST, OPCODES[opcode], key.bytesize, 20, 0, 0, key.bytesize + 20, 0, 0, h, l, dh, dl, expiry, key].pack(FORMAT[opcode])
write(req)
body = generic_response
body ? body.unpack('Q>').first : body
end
def decr(key, count, ttl, default)
decr_incr :decr, key, count, ttl, default
end
def incr(key, count, ttl, default)
decr_incr :incr, key, count, ttl, default
end
def write_append_prepend(opcode, key, value)
write_generic [REQUEST, OPCODES[opcode], key.bytesize, 0, 0, 0, value.bytesize + key.bytesize, 0, 0, key, value].pack(FORMAT[opcode])
end
def write_generic(bytes)
write(bytes)
generic_response
end
def write_noop
req = [REQUEST, OPCODES[:noop], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
write(req)
end
# Noop is a keepalive operation but also used to demarcate the end of a set of pipelined commands.
# We need to read all the responses at once.
def noop
write_noop
multi_response
end
def append(key, value)
write_append_prepend :append, key, value
end
def prepend(key, value)
write_append_prepend :prepend, key, value
end
def stats(info='')
req = [REQUEST, OPCODES[:stat], info.bytesize, 0, 0, 0, info.bytesize, 0, 0, info].pack(FORMAT[:stat])
write(req)
keyvalue_response
end
def reset_stats
write_generic [REQUEST, OPCODES[:stat], 'reset'.bytesize, 0, 0, 0, 'reset'.bytesize, 0, 0, 'reset'].pack(FORMAT[:stat])
end
def cas(key)
req = [REQUEST, OPCODES[:get], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:get])
write(req)
data_cas_response
end
def version
write_generic [REQUEST, OPCODES[:version], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
end
def touch(key, ttl)
ttl = sanitize_ttl(ttl)
write_generic [REQUEST, OPCODES[:touch], key.bytesize, 4, 0, 0, key.bytesize + 4, 0, 0, ttl, key].pack(FORMAT[:touch])
end
# http://www.hjp.at/zettel/m/memcached_flags.rxml
# Looks like most clients use bit 0 to indicate native language serialization
# and bit 1 to indicate gzip compression.
FLAG_SERIALIZED = 0x1
FLAG_COMPRESSED = 0x2
def serialize(key, value, options=nil)
marshalled = false
value = unless options && options[:raw]
marshalled = true
begin
self.serializer.dump(value)
rescue Timeout::Error => e
raise e
rescue => ex
# Marshalling can throw several different types of generic Ruby exceptions.
# Convert to a specific exception so we can special case it higher up the stack.
exc = Dalli::MarshalError.new(ex.message)
exc.set_backtrace ex.backtrace
raise exc
end
else
value.to_s
end
compressed = false
set_compress_option = true if options && options[:compress]
if (@options[:compress] || set_compress_option) && value.bytesize >= @options[:compression_min_size] &&
(!@options[:compression_max_size] || value.bytesize <= @options[:compression_max_size])
value = self.compressor.compress(value)
compressed = true
end
flags = 0
flags |= FLAG_COMPRESSED if compressed
flags |= FLAG_SERIALIZED if marshalled
[value, flags]
end
def deserialize(value, flags)
value = self.compressor.decompress(value) if (flags & FLAG_COMPRESSED) != 0
value = self.serializer.load(value) if (flags & FLAG_SERIALIZED) != 0
value
rescue TypeError
raise if $!.message !~ /needs to have method `_load'|exception class\/object expected|instance of IO needed|incompatible marshal file format/
raise UnmarshalError, "Unable to unmarshal value: #{$!.message}"
rescue ArgumentError
raise if $!.message !~ /undefined class|marshal data too short/
raise UnmarshalError, "Unable to unmarshal value: #{$!.message}"
rescue Zlib::Error
raise UnmarshalError, "Unable to uncompress value: #{$!.message}"
end
def data_cas_response
(extras, _, status, count, _, cas) = read_header.unpack(CAS_HEADER)
data = read(count) if count > 0
if status == 1
nil
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
elsif data
flags = data[0...extras].unpack('N')[0]
value = data[extras..-1]
data = deserialize(value, flags)
end
[data, cas]
end
CAS_HEADER = '@4CCnNNQ'
NORMAL_HEADER = '@4CCnN'
KV_HEADER = '@2n@6nN@16Q'
def guard_max_value(key, value)
if value.bytesize <= @options[:value_max_bytes]
yield
else
message = "Value for #{key} over max size: #{@options[:value_max_bytes]} <= #{value.bytesize}"
raise Dalli::ValueOverMaxSize, message if @options[:error_when_over_max_size]
Dalli.logger.warn message
false
end
end
# https://github.com/memcached/memcached/blob/master/doc/protocol.txt#L79
# > An expiration time, in seconds. Can be up to 30 days. After 30 days, is treated as a unix timestamp of an exact date.
MAX_ACCEPTABLE_EXPIRATION_INTERVAL = 30*24*60*60 # 30 days
def sanitize_ttl(ttl)
ttl_as_i = ttl.to_i
return ttl_as_i if ttl_as_i <= MAX_ACCEPTABLE_EXPIRATION_INTERVAL
now = Time.now.to_i
return ttl_as_i if ttl_as_i > now # already a timestamp
Dalli.logger.debug "Expiration interval (#{ttl_as_i}) too long for Memcached, converting to an expiration timestamp"
now + ttl_as_i
end
# Implements the NullObject pattern to store an application-defined value for 'Key not found' responses.
class NilObject; end
NOT_FOUND = NilObject.new
def generic_response(unpack=false, cache_nils=false)
(extras, _, status, count) = read_header.unpack(NORMAL_HEADER)
data = read(count) if count > 0
if status == 1
cache_nils ? NOT_FOUND : nil
elsif status == 2 || status == 5
false # Not stored, normal status for add operation
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
elsif data
flags = data[0...extras].unpack('N')[0]
value = data[extras..-1]
unpack ? deserialize(value, flags) : value
else
true
end
end
def cas_response
(_, _, status, count, _, cas) = read_header.unpack(CAS_HEADER)
read(count) if count > 0 # this is potential data that we don't care about
if status == 1
nil
elsif status == 2 || status == 5
false # Not stored, normal status for add operation
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
else
cas
end
end
def keyvalue_response
hash = {}
while true
(key_length, _, body_length, _) = read_header.unpack(KV_HEADER)
return hash if key_length == 0
key = read(key_length)
value = read(body_length - key_length) if body_length - key_length > 0
hash[key] = value
end
end
def multi_response
hash = {}
while true
(key_length, _, body_length, _) = read_header.unpack(KV_HEADER)
return hash if key_length == 0
flags = read(4).unpack('N')[0]
key = read(key_length)
value = read(body_length - key_length - 4) if body_length - key_length - 4 > 0
hash[key] = deserialize(value, flags)
end
end
def write(bytes)
begin
@inprogress = true
result = @sock.write(bytes)
@inprogress = false
result
rescue SystemCallError, Timeout::Error => e
failure!(e)
end
end
def read(count)
begin
@inprogress = true
data = @sock.readfull(count)
@inprogress = false
data
rescue SystemCallError, Timeout::Error, EOFError => e
failure!(e)
end
end
def read_header
read(24) || raise(Dalli::NetworkError, 'No response')
end
def connect
Dalli.logger.debug { "Dalli::Server#connect #{name}" }
begin
@pid = Process.pid
if socket_type == :unix
@sock = KSocket::UNIX.open(hostname, self, options)
else
@sock = KSocket::TCP.open(hostname, port, self, options)
end
sasl_authentication if need_auth?
@version = version # trigger actual connect
up!
rescue Dalli::DalliError # SASL auth failure
raise
rescue SystemCallError, Timeout::Error, EOFError, SocketError => e
# SocketError = DNS resolution failure
failure!(e)
end
end
def split(n)
[n >> 32, 0xFFFFFFFF & n]
end
REQUEST = 0x80
RESPONSE = 0x81
# Response codes taken from:
# https://github.com/memcached/memcached/wiki/BinaryProtocolRevamped#response-status
RESPONSE_CODES = {
0 => 'No error',
1 => 'Key not found',
2 => 'Key exists',
3 => 'Value too large',
4 => 'Invalid arguments',
5 => 'Item not stored',
6 => 'Incr/decr on a non-numeric value',
7 => 'The vbucket belongs to another server',
8 => 'Authentication error',
9 => 'Authentication continue',
0x20 => 'Authentication required',
0x81 => 'Unknown command',
0x82 => 'Out of memory',
0x83 => 'Not supported',
0x84 => 'Internal error',
0x85 => 'Busy',
0x86 => 'Temporary failure'
}
OPCODES = {
:get => 0x00,
:set => 0x01,
:add => 0x02,
:replace => 0x03,
:delete => 0x04,
:incr => 0x05,
:decr => 0x06,
:flush => 0x08,
:noop => 0x0A,
:version => 0x0B,
:getkq => 0x0D,
:append => 0x0E,
:prepend => 0x0F,
:stat => 0x10,
:setq => 0x11,
:addq => 0x12,
:replaceq => 0x13,
:deleteq => 0x14,
:incrq => 0x15,
:decrq => 0x16,
:auth_negotiation => 0x20,
:auth_request => 0x21,
:auth_continue => 0x22,
:touch => 0x1C,
}
HEADER = "CCnCCnNNQ"
OP_FORMAT = {
:get => 'a*',
:set => 'NNa*a*',
:add => 'NNa*a*',
:replace => 'NNa*a*',
:delete => 'a*',
:incr => 'NNNNNa*',
:decr => 'NNNNNa*',
:flush => 'N',
:noop => '',
:getkq => 'a*',
:version => '',
:stat => 'a*',
:append => 'a*a*',
:prepend => 'a*a*',
:auth_request => 'a*a*',
:auth_continue => 'a*a*',
:touch => 'Na*',
}
FORMAT = OP_FORMAT.inject({}) { |memo, (k, v)| memo[k] = HEADER + v; memo }
#######
# SASL authentication support for NorthScale
#######
def need_auth?
@options[:username] || ENV['MEMCACHE_USERNAME']
end
def username
@options[:username] || ENV['MEMCACHE_USERNAME']
end
def password
@options[:password] || ENV['MEMCACHE_PASSWORD']
end
def sasl_authentication
Dalli.logger.info { "Dalli/SASL authenticating as #{username}" }
# negotiate
req = [REQUEST, OPCODES[:auth_negotiation], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
write(req)
(extras, _type, status, count) = read_header.unpack(NORMAL_HEADER)
raise Dalli::NetworkError, "Unexpected message format: #{extras} #{count}" unless extras == 0 && count > 0
content = read(count).gsub(/\u0000/, ' ')
return (Dalli.logger.debug("Authentication not required/supported by server")) if status == 0x81
mechanisms = content.split(' ')
raise NotImplementedError, "Dalli only supports the PLAIN authentication mechanism" if !mechanisms.include?('PLAIN')
# request
mechanism = 'PLAIN'
msg = "\x0#{username}\x0#{password}"
req = [REQUEST, OPCODES[:auth_request], mechanism.bytesize, 0, 0, 0, mechanism.bytesize + msg.bytesize, 0, 0, mechanism, msg].pack(FORMAT[:auth_request])
write(req)
(extras, _type, status, count) = read_header.unpack(NORMAL_HEADER)
raise Dalli::NetworkError, "Unexpected message format: #{extras} #{count}" unless extras == 0 && count > 0
content = read(count)
return Dalli.logger.info("Dalli/SASL: #{content}") if status == 0
raise Dalli::DalliError, "Error authenticating: #{status}" unless status == 0x21
raise NotImplementedError, "No two-step authentication mechanisms supported"
# (step, msg) = sasl.receive('challenge', content)
# raise Dalli::NetworkError, "Authentication failed" if sasl.failed? || step != 'response'
end
def parse_hostname(str)
res = str.match(/\A(\[([\h:]+)\]|[^:]+)(?::(\d+))?(?::(\d+))?\z/)
raise Dalli::DalliError, "Could not parse hostname #{str}" if res.nil? || res[1] == '[]'
hostnam = res[2] || res[1]
if hostnam =~ /\A\//
socket_type = :unix
# in case of unix socket, allow only setting of weight, not port
raise Dalli::DalliError, "Could not parse hostname #{str}" if res[4]
weigh = res[3]
else
socket_type = :tcp
por = res[3] || DEFAULT_PORT
por = Integer(por)
weigh = res[4]
end
weigh ||= DEFAULT_WEIGHT
weigh = Integer(weigh)
return hostnam, por, weigh, socket_type
end
end
end
dalli-2.7.9/lib/dalli/socket.rb 0000664 0000000 0000000 00000010711 13362715446 0016301 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'rbconfig'
module Dalli::Server::TCPSocketOptions
def setsockopts(sock, options)
sock.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, true)
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if options[:keepalive]
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_RCVBUF, options[:rcvbuf]) if options[:rcvbuf]
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDBUF, options[:sndbuf]) if options[:sndbuf]
end
end
begin
require 'kgio'
puts "Using kgio socket IO" if defined?($TESTING) && $TESTING
class Dalli::Server::KSocket < Kgio::Socket
attr_accessor :options, :server
def kgio_wait_readable
IO.select([self], nil, nil, options[:socket_timeout]) || raise(Timeout::Error, "IO timeout")
end
def kgio_wait_writable
IO.select(nil, [self], nil, options[:socket_timeout]) || raise(Timeout::Error, "IO timeout")
end
alias :write :kgio_write
def readfull(count)
value = String.new('')
while true
value << kgio_read!(count - value.bytesize)
break if value.bytesize == count
end
value
end
def read_available
value = String.new('')
while true
ret = kgio_tryread(8196)
case ret
when nil
raise EOFError, 'end of stream'
when :wait_readable
break
else
value << ret
end
end
value
end
end
class Dalli::Server::KSocket::TCP < Dalli::Server::KSocket
extend Dalli::Server::TCPSocketOptions
def self.open(host, port, server, options = {})
addr = Socket.pack_sockaddr_in(port, host)
sock = start(addr)
setsockopts(sock, options)
sock.options = options
sock.server = server
sock.kgio_wait_writable
sock
rescue Timeout::Error
sock.close if sock
raise
end
end
class Dalli::Server::KSocket::UNIX < Dalli::Server::KSocket
def self.open(path, server, options = {})
addr = Socket.pack_sockaddr_un(path)
sock = start(addr)
sock.options = options
sock.server = server
sock.kgio_wait_writable
sock
rescue Timeout::Error
sock.close if sock
raise
end
end
if ::Kgio.respond_to?(:wait_readable=)
::Kgio.wait_readable = :kgio_wait_readable
::Kgio.wait_writable = :kgio_wait_writable
end
rescue LoadError
puts "Using standard socket IO (#{RUBY_DESCRIPTION})" if defined?($TESTING) && $TESTING
module Dalli::Server::KSocket
module InstanceMethods
def readfull(count)
value = String.new('')
begin
while true
value << read_nonblock(count - value.bytesize)
break if value.bytesize == count
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
if IO.select([self], nil, nil, options[:socket_timeout])
retry
else
safe_options = options.reject{|k,v| [:username, :password].include? k}
raise Timeout::Error, "IO timeout: #{safe_options.inspect}"
end
end
value
end
def read_available
value = String.new('')
while true
begin
value << read_nonblock(8196)
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
break
end
end
value
end
end
def self.included(receiver)
receiver.send(:attr_accessor, :options, :server)
receiver.send(:include, InstanceMethods)
end
end
class Dalli::Server::KSocket::TCP < TCPSocket
extend Dalli::Server::TCPSocketOptions
include Dalli::Server::KSocket
def self.open(host, port, server, options = {})
Timeout.timeout(options[:socket_timeout]) do
sock = new(host, port)
setsockopts(sock, options)
sock.options = {:host => host, :port => port}.merge(options)
sock.server = server
sock
end
end
end
if RbConfig::CONFIG['host_os'] =~ /mingw|mswin/
class Dalli::Server::KSocket::UNIX
def initialize(*args)
raise Dalli::DalliError, "Unix sockets are not supported on Windows platform."
end
end
else
class Dalli::Server::KSocket::UNIX < UNIXSocket
include Dalli::Server::KSocket
def self.open(path, server, options = {})
Timeout.timeout(options[:socket_timeout]) do
sock = new(path)
sock.options = {:path => path}.merge(options)
sock.server = server
sock
end
end
end
end
end
dalli-2.7.9/lib/dalli/version.rb 0000664 0000000 0000000 00000000103 13362715446 0016470 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
module Dalli
VERSION = '2.7.9'
end
dalli-2.7.9/lib/rack/ 0000775 0000000 0000000 00000000000 13362715446 0014317 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/rack/session/ 0000775 0000000 0000000 00000000000 13362715446 0016002 5 ustar 00root root 0000000 0000000 dalli-2.7.9/lib/rack/session/dalli.rb 0000664 0000000 0000000 00000015373 13362715446 0017425 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require 'rack/session/abstract/id'
require 'dalli'
module Rack
module Session
class Dalli < defined?(Abstract::Persisted) ? Abstract::Persisted : Abstract::ID
attr_reader :pool, :mutex
DEFAULT_DALLI_OPTIONS = {
:namespace => 'rack:session',
:memcache_server => 'localhost:11211'
}
# Brings in a new Rack::Session::Dalli middleware with the given
# `:memcache_server`. The server is either a hostname, or a
# host-with-port string in the form of "host_name:port", or an array of
# such strings. For example:
#
# use Rack::Session::Dalli,
# :memcache_server => "mc.example.com:1234"
#
# If no `:memcache_server` option is specified, Rack::Session::Dalli will
# connect to localhost, port 11211 (the default memcached port). If
# `:memcache_server` is set to nil, Dalli::Client will look for
# ENV['MEMCACHE_SERVERS'] and use that value if it is available, or fall
# back to the same default behavior described above.
#
# Rack::Session::Dalli is intended to be a drop-in replacement for
# Rack::Session::Memcache. It accepts additional options that control the
# behavior of Rack::Session, Dalli::Client, and an optional
# ConnectionPool. First and foremost, if you wish to instantiate your own
# Dalli::Client (or ConnectionPool) and use that instead of letting
# Rack::Session::Dalli instantiate it on your behalf, simply pass it in
# as the `:cache` option. Please note that you will be responsible for
# setting the namespace and any other options on Dalli::Client.
#
# Secondly, if you're not using the `:cache` option, Rack::Session::Dalli
# accepts the same options as Dalli::Client, so it's worth reviewing its
# documentation. Perhaps most importantly, if you don't specify a
# `:namespace` option, Rack::Session::Dalli will default to using
# "rack:session".
#
# Whether you are using the `:cache` option or not, it is not recommend
# to set `:expires_in`. Instead, use `:expire_after`, which will control
# both the expiration of the client cookie as well as the expiration of
# the corresponding entry in memcached.
#
# Rack::Session::Dalli also accepts a host of options that control how
# the sessions and session cookies are managed, including the
# aforementioned `:expire_after` option. Please see the documentation for
# Rack::Session::Abstract::Persisted for a detailed explanation of these
# options and their default values.
#
# Finally, if your web application is multithreaded, the
# Rack::Session::Dalli middleware can become a source of contention. You
# can use a connection pool of Dalli clients by passing in the
# `:pool_size` and/or `:pool_timeout` options. For example:
#
# use Rack::Session::Dalli,
# :memcache_server => "mc.example.com:1234",
# :pool_size => 10
#
# You must include the `connection_pool` gem in your project if you wish
# to use pool support. Please see the documentation for ConnectionPool
# for more information about it and its default options (which would only
# be applicable if you supplied one of the two options, but not both).
#
def initialize(app, options={})
# Parent uses DEFAULT_OPTIONS to build @default_options for Rack::Session
super
# Determine the default TTL for newly-created sessions
@default_ttl = ttl @default_options[:expire_after]
# Normalize and validate passed options
cache, mserv, mopts, popts = extract_dalli_options options
@pool =
if cache # caller passed a Dalli::Client or ConnectionPool instance
cache
elsif popts # caller passed ConnectionPool options
ConnectionPool.new(popts) { ::Dalli::Client.new(mserv, mopts) }
else
::Dalli::Client.new(mserv, mopts)
end
if @pool.respond_to?(:alive!) # is a Dalli::Client
@mutex = Mutex.new
@pool.alive!
end
end
def get_session(env, sid)
with_block(env, [nil, {}]) do |dc|
unless sid and !sid.empty? and session = dc.get(sid)
old_sid, sid, session = sid, generate_sid_with(dc), {}
unless dc.add(sid, session, @default_ttl)
sid = old_sid
redo # generate a new sid and try again
end
end
[sid, session]
end
end
def set_session(env, session_id, new_session, options)
return false unless session_id
with_block(env, false) do |dc|
dc.set(session_id, new_session, ttl(options[:expire_after]))
session_id
end
end
def destroy_session(env, session_id, options)
with_block(env) do |dc|
dc.delete(session_id)
generate_sid_with(dc) unless options[:drop]
end
end
if defined?(Abstract::Persisted)
def find_session(req, sid)
get_session req.env, sid
end
def write_session(req, sid, session, options)
set_session req.env, sid, session, options
end
def delete_session(req, sid, options)
destroy_session req.env, sid, options
end
end
private
def extract_dalli_options(options)
return [options[:cache]] if options[:cache]
# Filter out Rack::Session-specific options and apply our defaults
mopts = DEFAULT_DALLI_OPTIONS.merge \
options.reject {|k, _| DEFAULT_OPTIONS.key? k }
mserv = mopts.delete :memcache_server
if mopts[:pool_size] || mopts[:pool_timeout]
popts = {}
popts[:size] = mopts.delete :pool_size if mopts[:pool_size]
popts[:timeout] = mopts.delete :pool_timeout if mopts[:pool_timeout]
# For a connection pool, locking is handled at the pool level
mopts[:threadsafe] = false unless mopts.key? :threadsafe
end
[nil, mserv, mopts, popts]
end
def generate_sid_with(dc)
while true
sid = generate_sid
break sid unless dc.get(sid)
end
end
def with_block(env, default=nil, &block)
@mutex.lock if @mutex and env['rack.multithread']
@pool.with(&block)
rescue ::Dalli::DalliError, Errno::ECONNREFUSED
raise if $!.message =~ /undefined class/
if $VERBOSE
warn "#{self} is unable to find memcached server."
warn $!.inspect
end
default
ensure
@mutex.unlock if @mutex and @mutex.locked?
end
def ttl(expire_after)
expire_after.nil? ? 0 : expire_after + 1
end
end
end
end
dalli-2.7.9/test/ 0000775 0000000 0000000 00000000000 13362715446 0013610 5 ustar 00root root 0000000 0000000 dalli-2.7.9/test/benchmark_test.rb 0000664 0000000 0000000 00000015770 13362715446 0017140 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
require 'benchmark'
require 'active_support/cache/dalli_store'
describe 'performance' do
before do
puts "Testing #{Dalli::VERSION} with #{RUBY_DESCRIPTION}"
# We'll use a simple @value to try to avoid spending time in Marshal,
# which is a constant penalty that both clients have to pay
@value = []
@marshalled = Marshal.dump(@value)
@port = 23417
@servers = ["127.0.0.1:#{@port}", "localhost:#{@port}"]
@key1 = "Short"
@key2 = "Sym1-2-3::45"*8
@key3 = "Long"*40
@key4 = "Medium"*8
# 5 and 6 are only used for multiget miss test
@key5 = "Medium2"*8
@key6 = "Long3"*40
@counter = 'counter'
end
it 'runs benchmarks' do
memcached(@port) do
Benchmark.bm(37) do |x|
n = 2500
@ds = ActiveSupport::Cache::DalliStore.new(@servers)
x.report("mixed:rails:dalli") do
n.times do
@ds.read @key1
@ds.write @key2, @value
@ds.fetch(@key3) { @value }
@ds.fetch(@key2) { @value }
@ds.fetch(@key1) { @value }
@ds.write @key2, @value, :unless_exists => true
@ds.delete @key2
@ds.increment @counter, 1, :initial => 100
@ds.increment @counter, 1, :expires_in => 12
@ds.decrement @counter, 1
end
end
x.report("mixed:rails-localcache:dalli") do
n.times do
@ds.with_local_cache do
@ds.read @key1
@ds.write @key2, @value
@ds.fetch(@key3) { @value }
@ds.fetch(@key2) { @value }
@ds.fetch(@key1) { @value }
@ds.write @key2, @value, :unless_exists => true
@ds.delete @key2
@ds.increment @counter, 1, :initial => 100
@ds.increment @counter, 1, :expires_in => 12
@ds.decrement @counter, 1
end
end
end
@ds.clear
sizeable_data = "" * 50
[@key1, @key2, @key3, @key4, @key5, @key6].each do |key|
@ds.write(key, sizeable_data)
end
x.report("read_multi_big:rails:dalli") do
n.times do
@ds.read_multi @key1, @key2, @key3, @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read_multi @key1, @key2, @key3
end
end
x.report("read_multi_big:rails-localcache:dalli") do
n.times do
@ds.with_local_cache do
@ds.read_multi @key1, @key2, @key3, @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read @key4
end
@ds.with_local_cache do
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read_multi @key1, @key2, @key3
end
end
end
@m = Dalli::Client.new(@servers)
x.report("set:plain:dalli") do
n.times do
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
end
end
@m = Dalli::Client.new(@servers)
x.report("setq:plain:dalli") do
@m.multi do
n.times do
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
end
end
end
@m = Dalli::Client.new(@servers)
x.report("set:ruby:dalli") do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
end
end
@m = Dalli::Client.new(@servers)
x.report("get:plain:dalli") do
n.times do
@m.get @key1, :raw => true
@m.get @key2, :raw => true
@m.get @key3, :raw => true
@m.get @key1, :raw => true
@m.get @key2, :raw => true
@m.get @key3, :raw => true
end
end
@m = Dalli::Client.new(@servers)
x.report("get:ruby:dalli") do
n.times do
@m.get @key1
@m.get @key2
@m.get @key3
@m.get @key1
@m.get @key2
@m.get @key3
end
end
@m = Dalli::Client.new(@servers)
x.report("multiget:ruby:dalli") do
n.times do
# We don't use the keys array because splat is slow
@m.get_multi @key1, @key2, @key3, @key4, @key5, @key6
end
end
@m = Dalli::Client.new(@servers)
x.report("missing:ruby:dalli") do
n.times do
begin @m.delete @key1; rescue; end
begin @m.get @key1; rescue; end
begin @m.delete @key2; rescue; end
begin @m.get @key2; rescue; end
begin @m.delete @key3; rescue; end
begin @m.get @key3; rescue; end
end
end
@m = Dalli::Client.new(@servers)
x.report("mixed:ruby:dalli") do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.get @key1
@m.get @key2
@m.get @key3
@m.set @key1, @value
@m.get @key1
@m.set @key2, @value
@m.get @key2
@m.set @key3, @value
@m.get @key3
end
end
@m = Dalli::Client.new(@servers)
x.report("mixedq:ruby:dalli") do
@m.multi do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.get @key1
@m.get @key2
@m.get @key3
@m.set @key1, @value
@m.get @key1
@m.set @key2, @value
@m.replace @key2, @value
@m.delete @key3
@m.add @key3, @value
@m.get @key2
@m.set @key3, @value
@m.get @key3
end
end
end
@m = Dalli::Client.new(@servers)
x.report("incr:ruby:dalli") do
counter = 'foocount'
n.times do
@m.incr counter, 1, 0, 1
end
n.times do
@m.decr counter, 1
end
assert_equal 0, @m.incr(counter, 0)
end
end
end
end
end
dalli-2.7.9/test/helper.rb 0000664 0000000 0000000 00000002672 13362715446 0015423 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
$TESTING = true
require 'bundler/setup'
# require 'simplecov'
# SimpleCov.start
require 'minitest/pride' unless RUBY_ENGINE == 'rbx'
require 'minitest/autorun'
require 'mocha/setup'
require_relative 'memcached_mock'
ENV['MEMCACHED_SASL_PWDB'] = "#{File.dirname(__FILE__)}/sasl/sasldb"
ENV['SASL_CONF_PATH'] = "#{File.dirname(__FILE__)}/sasl/memcached.conf"
require 'rails'
puts "Testing with Rails #{Rails.version}"
require 'dalli'
require 'logger'
require 'active_support/time'
require 'active_support/cache/dalli_store'
Dalli.logger = Logger.new(STDOUT)
Dalli.logger.level = Logger::ERROR
class MiniTest::Spec
include MemcachedMock::Helper
def assert_error(error, regexp=nil, &block)
ex = assert_raises(error, &block)
assert_match(regexp, ex.message, "#{ex.class.name}: #{ex.message}\n#{ex.backtrace.join("\n\t")}")
end
def op_cas_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
def op_replace_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
# add and set must have the same return value because of DalliStore#write_entry
def op_addset_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
def with_activesupport
require 'active_support/all'
require 'active_support/cache/dalli_store'
yield
end
def with_actionpack
require 'action_dispatch'
require 'action_controller'
yield
end
def with_connectionpool
require 'connection_pool'
yield
end
end
dalli-2.7.9/test/memcached_mock.rb 0000664 0000000 0000000 00000011716 13362715446 0017062 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require "socket"
require "tempfile"
$started = {}
module MemcachedMock
UNIX_SOCKET_PATH = (f = Tempfile.new('dalli_test'); f.close; f.path)
def self.start(port=19123)
server = TCPServer.new("localhost", port)
session = server.accept
yield(session)
end
def self.start_unix(path=UNIX_SOCKET_PATH)
begin
File.delete(path)
rescue Errno::ENOENT
end
server = UNIXServer.new(path)
session = server.accept
yield(session)
end
def self.delayed_start(port=19123, wait=1)
server = TCPServer.new("localhost", port)
sleep wait
yield(server)
end
module Helper
# Forks the current process and starts a new mock Memcached server on
# port 22122.
#
# memcached_mock(lambda {|sock| socket.write('123') }) do
# assert_equal "PONG", Dalli::Client.new('localhost:22122').get('abc')
# end
#
def memcached_mock(proc, meth=:start, meth_args=[])
return unless supports_fork?
begin
pid = fork do
trap("TERM") { exit }
MemcachedMock.send(meth, *meth_args) do |*args|
proc.call(*args)
end
end
sleep 0.3 # Give time for the socket to start listening.
yield
ensure
if pid
Process.kill("TERM", pid)
Process.wait(pid)
end
end
end
PATHS = %w(
/usr/local/bin/
/opt/local/bin/
/usr/bin/
)
def find_memcached
output = `memcached -h | head -1`.strip
if output && output =~ /^memcached (\d.\d.\d+)/ && $1 > '1.4'
return (puts "Found #{output} in PATH"; '')
end
PATHS.each do |path|
output = `memcached -h | head -1`.strip
if output && output =~ /^memcached (\d\.\d\.\d+)/ && $1 > '1.4'
return (puts "Found #{output} in #{path}"; path)
end
end
raise Errno::ENOENT, "Unable to find memcached 1.4+ locally"
end
def memcached_persistent(port=21345, options={})
dc = start_and_flush_with_retry(port, '', options)
yield dc, port if block_given?
end
def sasl_credentials
{ :username => 'testuser', :password => 'testtest' }
end
def sasl_env
{
'MEMCACHED_SASL_PWDB' => "#{File.dirname(__FILE__)}/sasl/sasldb",
'SASL_CONF_PATH' => "#{File.dirname(__FILE__)}/sasl/memcached.conf"
}
end
def memcached_sasl_persistent(port=21397)
dc = start_and_flush_with_retry(port, '-S', sasl_credentials)
yield dc, port if block_given?
end
def memcached_cas_persistent(port = 25662)
require 'dalli/cas/client'
dc = start_and_flush_with_retry(port)
yield dc, port if block_given?
end
def memcached_low_mem_persistent(port = 19128)
dc = start_and_flush_with_retry(port, '-m 1 -M')
yield dc, port if block_given?
end
def start_and_flush_with_retry(port, args = '', client_options = {})
dc = nil
retry_count = 0
while dc.nil? do
begin
dc = start_and_flush(port, args, client_options, (retry_count == 0))
rescue StandardError => e
$started[port] = nil
retry_count += 1
raise e if retry_count >= 3
end
end
dc
end
def start_and_flush(port, args = '', client_options = {}, flush = true)
memcached_server(port, args)
if "#{port}" =~ /\A\//
# unix socket
dc = Dalli::Client.new(port, client_options)
else
dc = Dalli::Client.new(["localhost:#{port}", "127.0.0.1:#{port}"], client_options)
end
dc.flush_all if flush
dc
end
def memcached(port, args='', client_options={})
dc = start_and_flush_with_retry(port, args, client_options)
yield dc, port if block_given?
memcached_kill(port)
end
def memcached_server(port, args='')
Memcached.path ||= find_memcached
if "#{port}" =~ /\A\//
# unix socket
port_socket_arg = '-s'
begin
File.delete(port)
rescue Errno::ENOENT
end
else
port_socket_arg = '-p'
port = port.to_i
end
cmd = "#{Memcached.path}memcached #{args} #{port_socket_arg} #{port}"
$started[port] ||= begin
pid = IO.popen(cmd).pid
at_exit do
begin
Process.kill("TERM", pid)
Process.wait(pid)
rescue Errno::ECHILD, Errno::ESRCH
end
end
wait_time = (args && args =~ /\-S/) ? 0.1 : 0.1
sleep wait_time
pid
end
end
def supports_fork?
!defined?(RUBY_ENGINE) || !jruby?
end
def jruby?
RUBY_ENGINE == 'jruby'
end
def memcached_kill(port)
pid = $started.delete(port)
if pid
begin
Process.kill("TERM", pid)
Process.wait(pid)
rescue Errno::ECHILD, Errno::ESRCH => e
puts e.inspect
end
end
end
end
end
module Memcached
class << self
attr_accessor :path
end
end
dalli-2.7.9/test/sasl/ 0000775 0000000 0000000 00000000000 13362715446 0014552 5 ustar 00root root 0000000 0000000 dalli-2.7.9/test/sasl/memcached.conf 0000664 0000000 0000000 00000000021 13362715446 0017320 0 ustar 00root root 0000000 0000000 mech_list: plain
dalli-2.7.9/test/sasl/sasldb 0000664 0000000 0000000 00000000031 13362715446 0015737 0 ustar 00root root 0000000 0000000 testuser:testtest:::::::
dalli-2.7.9/test/test_active_support.rb 0000664 0000000 0000000 00000044523 13362715446 0020253 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# frozen_string_literal: true
require_relative 'helper'
require 'connection_pool'
class MockUser
def cache_key
"users/1/21348793847982314"
end
end
class MockUserVersioning
def cache_key_with_version
"users/1/241012793847982434"
end
end
class ObjectRaisingEquality
def ==(other)
raise "Equality called on fetched object."
end
end
describe 'ActiveSupport::Cache::DalliStore' do
# with and without local cache
def self.it_with_and_without_local_cache(message, &block)
it "#{message} with LocalCache" do
with_cache do
@dalli.with_local_cache do
instance_eval(&block)
end
end
end
it "#{message} without LocalCache" do
with_cache do
instance_eval(&block)
end
end
end
describe 'active_support caching' do
it 'has accessible options' do
with_cache :expires_in => 5.minutes, :frob => 'baz' do
assert_equal 'baz', @dalli.options[:frob]
end
end
it_with_and_without_local_cache 'allow mute and silence' do
@dalli.mute do
assert op_addset_succeeds(@dalli.write('foo', 'bar', nil))
assert_equal 'bar', @dalli.read('foo', nil)
end
refute @dalli.silence?
@dalli.silence!
assert_equal true, @dalli.silence?
end
it_with_and_without_local_cache 'handle nil options' do
assert op_addset_succeeds(@dalli.write('foo', 'bar', nil))
assert_equal 'bar', @dalli.read('foo', nil)
assert_equal 18, @dalli.fetch('lkjsadlfk', nil) { 18 }
assert_equal 18, @dalli.fetch('lkjsadlfk', nil) { 18 }
assert_equal 1, @dalli.increment('lkjsa', 1, nil)
assert_equal 2, @dalli.increment('lkjsa', 1, nil)
assert_equal 1, @dalli.decrement('lkjsa', 1, nil)
assert_equal true, @dalli.delete('lkjsa')
end
describe 'fetch' do
it_with_and_without_local_cache 'support expires_in' do
dvalue = @dalli.fetch('someotherkeywithoutspaces', :expires_in => 1.second) { 123 }
assert_equal 123, dvalue
end
it_with_and_without_local_cache 'tests cache misses using correct operand ordering' do
# Some objects customise their equality methods. If you call #== on these objects this can mean your
# returned value from the gem to your application is technically different to what's serialised in the cache.
#
# See https://github.com/petergoldstein/dalli/pull/662
#
obj = ObjectRaisingEquality.new
@dalli.fetch('obj') { obj }
end
it_with_and_without_local_cache 'fallback block gets a key as a parameter' do
key = rand_key
o = Object.new
o.instance_variable_set :@foo, 'bar'
dvalue = @dalli.fetch(key) { |k| "#{k}-#{o}" }
assert_equal "#{key}-#{o}", dvalue
end
it_with_and_without_local_cache 'support object' do
o = Object.new
o.instance_variable_set :@foo, 'bar'
dvalue = @dalli.fetch(rand_key) { o }
assert_equal o, dvalue
end
it_with_and_without_local_cache 'support object with raw' do
o = Object.new
o.instance_variable_set :@foo, 'bar'
dvalue = @dalli.fetch(rand_key, :raw => true) { o }
assert_equal o, dvalue
end
it_with_and_without_local_cache 'support false value' do
@dalli.write('false', false)
dvalue = @dalli.fetch('false') { flunk }
assert_equal false, dvalue
end
it 'support nil value when cache_nils: true' do
with_cache cache_nils: true do
@dalli.write('nil', nil)
dvalue = @dalli.fetch('nil') { flunk }
assert_nil dvalue
end
with_cache cache_nils: false do
@dalli.write('nil', nil)
executed = false
dvalue = @dalli.fetch('nil') { executed = true; 'bar' }
assert_equal true, executed
assert_equal 'bar', dvalue
end
end
it_with_and_without_local_cache 'support object with cache_key' do
user = MockUser.new
@dalli.write(user.cache_key, false)
dvalue = @dalli.fetch(user) { flunk }
assert_equal false, dvalue
end
it_with_and_without_local_cache 'support object with cache_key_with_version' do
user = MockUserVersioning.new
@dalli.write(user.cache_key_with_version, false)
dvalue = @dalli.fetch(user) { flunk }
assert_equal false, dvalue
end
end
it_with_and_without_local_cache 'support keys with spaces' do
dvalue = @dalli.fetch('some key with spaces', :expires_in => 1.second) { 123 }
assert_equal 123, dvalue
end
it_with_and_without_local_cache 'support read_multi' do
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi(x, y))
@dalli.write(x, '123')
@dalli.write(y, 123)
assert_equal({ x => '123', y => 123 }, @dalli.read_multi(x, y))
end
it_with_and_without_local_cache 'support read_multi with an array' do
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi([x, y]))
@dalli.write(x, '123')
@dalli.write(y, 123)
assert_equal({}, @dalli.read_multi([x, y]))
@dalli.write([x, y], '123')
assert_equal({ [x, y] => '123' }, @dalli.read_multi([x, y]))
end
it_with_and_without_local_cache 'support read_multi with an empty array' do
assert_equal({}, @dalli.read_multi([]))
end
it 'support raw read_multi' do # TODO fails with LocalCache
with_cache do
@dalli.write("abc", 5, :raw => true)
@dalli.write("cba", 5, :raw => true)
assert_equal({'abc' => '5', 'cba' => '5' }, @dalli.read_multi("abc", "cba"))
end
end
it 'support read_multi with LocalCache' do
with_cache do
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi(x, y))
@dalli.write(x, '123')
@dalli.write(y, 456)
@dalli.with_local_cache do
assert_equal({ x => '123', y => 456 }, @dalli.read_multi(x, y))
Dalli::Client.any_instance.expects(:get).with(any_parameters).never
dres = @dalli.read(x)
assert_equal dres, '123'
end
Dalli::Client.any_instance.unstub(:get)
# Fresh LocalStore
@dalli.with_local_cache do
@dalli.read(x)
Dalli::Client.any_instance.expects(:get_multi).with([y.to_s]).returns(y.to_s => 456)
assert_equal({ x => '123', y => 456}, @dalli.read_multi(x, y))
end
end
end
it 'support read_multi with special Regexp characters in namespace' do
# /(?!)/ is a contradictory PCRE and should never be able to match
with_cache :namespace => '(?!)' do
@dalli.write('abc', 123)
@dalli.write('xyz', 456)
assert_equal({'abc' => 123, 'xyz' => 456}, @dalli.read_multi('abc', 'xyz'))
end
end
it_with_and_without_local_cache 'supports fetch_multi' do
x = rand_key.to_s
y = rand_key
hash = { x => 'ABC', y => 'DEF' }
@dalli.write(y, '123')
results = @dalli.fetch_multi(x, y) { |key| hash[key] }
assert_equal({ x => 'ABC', y => '123' }, results)
assert_equal('ABC', @dalli.read(x))
assert_equal('123', @dalli.read(y))
end
it_with_and_without_local_cache 'supports fetch_multi with large cache keys' do
x = "x" * 512
y = "y" * 512
hash = { x => 'ABC', y => 'DEF' }
@dalli.write(y, '123')
results = @dalli.fetch_multi(x, y) { |key| hash[key] }
assert_equal({ x => 'ABC', y => '123' }, results)
assert_equal('ABC', @dalli.read(x))
assert_equal('123', @dalli.read(y))
end
it_with_and_without_local_cache 'support read, write and delete' do
y = rand_key
assert_nil @dalli.read(y)
dres = @dalli.write(y, 123)
assert op_addset_succeeds(dres)
dres = @dalli.read(y)
assert_equal 123, dres
dres = @dalli.delete(y)
assert_equal true, dres
user = MockUser.new
dres = @dalli.write(user.cache_key, "foo")
assert op_addset_succeeds(dres)
dres = @dalli.read(user)
assert_equal "foo", dres
dres = @dalli.delete(user)
assert_equal true, dres
dres = @dalli.write(:false_value, false)
assert op_addset_succeeds(dres)
dres = @dalli.read(:false_value)
assert_equal false, dres
bigkey = '123456789012345678901234567890'
@dalli.write(bigkey, 'double width')
assert_equal 'double width', @dalli.read(bigkey)
assert_equal({bigkey => "double width"}, @dalli.read_multi(bigkey))
end
it_with_and_without_local_cache 'support read, write and delete with local namespace' do
key = 'key_with_namespace'
namespace_value = @dalli.fetch(key, :namespace => 'namespace') { 123 }
assert_equal 123, namespace_value
res = @dalli.read(key, :namespace => 'namespace')
assert_equal 123, res
res = @dalli.delete(key, :namespace => 'namespace')
assert_equal true, res
res = @dalli.write(key, "foo", :namespace => 'namespace')
assert op_addset_succeeds(res)
res = @dalli.read(key, :namespace => 'namespace')
assert_equal "foo", res
end
it_with_and_without_local_cache 'support multi_read and multi_fetch with local namespace' do
x = rand_key.to_s
y = rand_key
namespace = 'namespace'
hash = { x => 'ABC', y => 'DEF' }
results = @dalli.fetch_multi(x, y, :namespace => namespace) { |key| hash[key] }
assert_equal({ x => 'ABC', y => 'DEF' }, results)
assert_equal('ABC', @dalli.read(x, :namespace => namespace))
assert_equal('DEF', @dalli.read(y, :namespace => namespace))
@dalli.write("abc", 5, :namespace => 'namespace')
@dalli.write("cba", 5, :namespace => 'namespace')
assert_equal({'abc' => 5, 'cba' => 5 }, @dalli.read_multi("abc", "cba", :namespace => 'namespace'))
end
it 'support read, write and delete with LocalCache' do
with_cache do
y = rand_key.to_s
@dalli.with_local_cache do
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(123)
dres = @dalli.read(y)
assert_equal 123, dres
Dalli::Client.any_instance.expects(:get).with(y, {}).never
dres = @dalli.read(y)
assert_equal 123, dres
@dalli.write(y, 456)
dres = @dalli.read(y)
assert_equal 456, dres
@dalli.delete(y)
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(nil)
dres = @dalli.read(y)
assert_nil dres
end
end
end
it_with_and_without_local_cache 'support unless_exist' do
y = rand_key.to_s
@dalli.with_local_cache do
Dalli::Client.any_instance.expects(:add).with(y, 123, nil, {:unless_exist => true}).once.returns(true)
dres = @dalli.write(y, 123, :unless_exist => true)
assert_equal true, dres
Dalli::Client.any_instance.expects(:add).with(y, 321, nil, {:unless_exist => true}).once.returns(false)
dres = @dalli.write(y, 321, :unless_exist => true)
assert_equal false, dres
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(123)
dres = @dalli.read(y)
assert_equal 123, dres
end
end
it_with_and_without_local_cache 'support increment/decrement commands' do
assert op_addset_succeeds(@dalli.write('counter', 0, :raw => true))
assert_equal 1, @dalli.increment('counter')
assert_equal 2, @dalli.increment('counter')
assert_equal 1, @dalli.decrement('counter')
assert_equal "1", @dalli.read('counter', :raw => true)
assert_equal 1, @dalli.increment('counterX')
assert_equal 2, @dalli.increment('counterX')
assert_equal 2, @dalli.read('counterX', :raw => true).to_i
assert_equal 5, @dalli.increment('counterY1', 1, :initial => 5)
assert_equal 6, @dalli.increment('counterY1', 1, :initial => 5)
assert_equal 6, @dalli.read('counterY1', :raw => true).to_i
assert_nil @dalli.increment('counterZ1', 1, :initial => nil)
assert_nil @dalli.read('counterZ1')
assert_equal 5, @dalli.decrement('counterY2', 1, :initial => 5)
assert_equal 4, @dalli.decrement('counterY2', 1, :initial => 5)
assert_equal 4, @dalli.read('counterY2', :raw => true).to_i
assert_nil @dalli.decrement('counterZ2', 1, :initial => nil)
assert_nil @dalli.read('counterZ2')
user = MockUser.new
assert op_addset_succeeds(@dalli.write(user, 0, :raw => true))
assert_equal 1, @dalli.increment(user)
assert_equal 2, @dalli.increment(user)
assert_equal 1, @dalli.decrement(user)
assert_equal "1", @dalli.read(user, :raw => true)
end
it_with_and_without_local_cache 'support exist command' do
@dalli.write(:foo, 'a')
@dalli.write(:false_value, false)
assert_equal true, @dalli.exist?(:foo)
assert_equal true, @dalli.exist?(:false_value)
assert_equal false, @dalli.exist?(:bar)
user = MockUser.new
@dalli.write(user, 'foo')
assert_equal true, @dalli.exist?(user)
end
it_with_and_without_local_cache 'support other esoteric commands' do
ds = @dalli.stats
assert_equal 1, ds.keys.size
assert ds[ds.keys.first].keys.size > 0
@dalli.reset
end
it 'respects "raise_errors" option' do
new_port = 29333
with_cache port: new_port do
@dalli.write 'foo', 'bar'
assert_equal @dalli.read('foo'), 'bar'
memcached_kill(new_port)
silence_logger do
assert_nil @dalli.read('foo')
end
end
with_cache port: new_port, :raise_errors => true do
memcached_kill(new_port)
exception = [Dalli::RingError, { :message => "No server available" }]
silence_logger do
assert_raises(*exception) { @dalli.read 'foo' }
assert_raises(*exception) { @dalli.read 'foo', :raw => true }
assert_raises(*exception) { @dalli.write 'foo', 'bar' }
assert_raises(*exception) { @dalli.exist? 'foo' }
assert_raises(*exception) { @dalli.increment 'foo' }
assert_raises(*exception) { @dalli.decrement 'foo' }
assert_raises(*exception) { @dalli.delete 'foo' }
assert_equal @dalli.read_multi('foo', 'bar'), {}
assert_raises(*exception) { @dalli.delete 'foo' }
assert_raises(*exception) { @dalli.fetch('foo') { 42 } }
end
end
end
describe 'instruments' do
it 'notifies errors' do
new_port = 29333
key = 'foo'
with_cache port: new_port, :instrument_errors => true do
memcached_kill(new_port)
payload_proc = Proc.new { |payload| payload }
@dalli.expects(:instrument).with(:read, { :key => key }).yields(&payload_proc).once
@dalli.expects(:instrument).with(:error, { :key => "DalliError",
:message => "No server available" }).once
@dalli.read(key)
end
end
it 'payload hits' do
with_cache do
payload = {}
assert op_addset_succeeds(@dalli.write('false', false))
foo = @dalli.fetch('burrito') { 'tacos' }
assert 'tacos', foo
# NOTE: mocha stubbing for yields
# makes the result of the block nil in all cases
# there was a ticket about this:
# http://floehopper.lighthouseapp.com/projects/22289/tickets/14-8687-blocks-return-value-is-dropped-on-stubbed-yielding-methods
@dalli.stubs(:instrument).yields payload
@dalli.read('false')
assert_equal true, payload.delete(:hit)
@dalli.fetch('unset_key') { 'tacos' }
assert_equal false, payload.delete(:hit)
@dalli.fetch('burrito') { 'tacos' }
assert_equal true, payload.delete(:hit)
@dalli.unstub(:instrument)
end
end
end
end
it_with_and_without_local_cache 'handle crazy characters from far-away lands' do
key = "fooƒ"
value = 'bafƒ'
assert op_addset_succeeds(@dalli.write(key, value))
assert_equal value, @dalli.read(key)
end
it 'normalize options as expected' do
with_cache :expires_in => 1, :namespace => 'foo', :compress => true do
assert_equal 1, @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:expires_in]
assert_equal 'foo', @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:namespace]
assert_equal ["localhost:19987"], @dalli.instance_variable_get(:@data).instance_variable_get(:@servers)
end
end
it 'handles nil server with additional options' do
@dalli = ActiveSupport::Cache::DalliStore.new(nil, :expires_in => 1, :namespace => 'foo', :compress => true)
assert_equal 1, @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:expires_in]
assert_equal 'foo', @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:namespace]
assert_equal ["127.0.0.1:11211"], @dalli.instance_variable_get(:@data).instance_variable_get(:@servers)
end
it 'supports connection pooling' do
with_cache :expires_in => 1, :namespace => 'foo', :compress => true, :pool_size => 3 do
assert_nil @dalli.read('foo')
assert @dalli.write('foo', 1)
assert_equal 1, @dalli.fetch('foo') { raise 'boom' }
assert_equal true, @dalli.dalli.is_a?(ConnectionPool)
assert_equal 1, @dalli.increment('bar')
assert_equal 0, @dalli.decrement('bar')
assert_equal true, @dalli.delete('bar')
assert_equal [true], @dalli.clear
assert_equal 1, @dalli.stats.size
end
end
it_with_and_without_local_cache 'allow keys to be frozen' do
key = "foo"
key.freeze
assert op_addset_succeeds(@dalli.write(key, "value"))
end
it_with_and_without_local_cache 'allow keys from a hash' do
map = { "one" => "one", "two" => "two" }
map.each_pair do |k, v|
assert op_addset_succeeds(@dalli.write(k, v))
end
assert_equal map, @dalli.read_multi(*(map.keys))
end
def silence_logger
old = Dalli.logger.level
Dalli.logger.level = Logger::ERROR + 1
yield
ensure
Dalli.logger.level = old
end
def with_cache(options={})
port = options.delete(:port) || 19987
memcached_persistent(port) do
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, "localhost:#{port}", options)
@dalli.clear
yield
end
end
def rand_key
rand(1_000_000_000)
end
end
dalli-2.7.9/test/test_cas_client.rb 0000664 0000000 0000000 00000006113 13362715446 0017301 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'Dalli::Cas::Client' do
describe 'using a live server' do
it 'supports get with CAS' do
memcached_cas_persistent do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
get_block_called = false
stored_value = stored_cas = nil
# Validate call-with-block
dc.get_cas('gets_key') do |v, cas|
get_block_called = true
stored_value = v
stored_cas = cas
end
assert get_block_called
assert_nil stored_value
dc.set('gets_key', expected)
# Validate call-with-return-value
stored_value, stored_cas = dc.get_cas('gets_key')
assert_equal stored_value, expected
assert(stored_cas != 0)
end
end
it 'supports multi-get with CAS' do
memcached_cas_persistent do |dc|
dc.close
dc.flush
expected_hash = {'a' => 'foo', 'b' => 123}
expected_hash.each_pair do |k, v|
dc.set(k, v)
end
# Invocation without block
resp = dc.get_multi_cas(%w(a b c d e f))
resp.each_pair do |k, data|
value, cas = [data.first, data[1]]
assert_equal expected_hash[k], value
assert(cas && cas != 0)
end
# Invocation with block
dc.get_multi_cas(%w(a b c d e f)) do |k, data|
value, cas = [data.first, data[1]]
assert_equal expected_hash[k], value
assert(cas && cas != 0)
end
end
end
it 'supports replace-with-CAS operation' do
memcached_cas_persistent do |dc|
dc.flush
cas = dc.set('key', 'value')
# Accepts CAS, replaces, and returns new CAS
cas = dc.replace_cas('key', 'value2', cas)
assert cas.is_a?(Integer)
assert_equal 'value2', dc.get('key')
end
end
it 'supports delete with CAS' do
memcached_cas_persistent do |dc|
cas = dc.set('some_key', 'some_value')
dc.delete_cas('some_key', cas)
assert_nil dc.get('some_key')
end
end
it 'handles CAS round-trip operations' do
memcached_cas_persistent do |dc|
dc.flush
expected = {'blah' => 'blerg!'}
dc.set('some_key', expected)
value, cas = dc.get_cas('some_key')
assert_equal value, expected
assert(!cas.nil? && cas != 0)
# Set operation, first with wrong then with correct CAS
expected = {'blah' => 'set succeeded'}
assert(dc.set_cas('some_key', expected, cas+1) == false)
assert op_addset_succeeds(cas = dc.set_cas('some_key', expected, cas))
# Replace operation, first with wrong then with correct CAS
expected = {'blah' => 'replace succeeded'}
assert(dc.replace_cas('some_key', expected, cas+1) == false)
assert op_addset_succeeds(cas = dc.replace_cas('some_key', expected, cas))
# Delete operation, first with wrong then with correct CAS
assert(dc.delete_cas('some_key', cas+1) == false)
assert dc.delete_cas('some_key', cas)
end
end
end
end
dalli-2.7.9/test/test_compressor.rb 0000664 0000000 0000000 00000002505 13362715446 0017372 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# frozen_string_literal: true
require_relative 'helper'
require 'json'
class NoopCompressor
def self.compress(data)
data
end
def self.decompress(data)
data
end
end
describe 'Compressor' do
it 'default to Dalli::Compressor' do
memcached(29199) do |dc|
dc.set 1,2
assert_equal Dalli::Compressor, dc.instance_variable_get('@ring').servers.first.compressor
end
end
it 'support a custom compressor' do
memcached(29199) do |dc|
memcache = Dalli::Client.new('127.0.0.1:29199', :compressor => NoopCompressor)
memcache.set 1,2
begin
assert_equal NoopCompressor, memcache.instance_variable_get('@ring').servers.first.compressor
memcached(19127) do |newdc|
assert newdc.set("string-test", "a test string")
assert_equal("a test string", newdc.get("string-test"))
end
end
end
end
end
describe 'GzipCompressor' do
it 'compress and uncompress data using Zlib::GzipWriter/Reader' do
memcached(19127,nil,{:compress=>true,:compressor=>Dalli::GzipCompressor}) do |dc|
data = (0...1025).map{65.+(rand(26)).chr}.join
assert dc.set("test", data)
assert_equal Dalli::GzipCompressor, dc.instance_variable_get('@ring').servers.first.compressor
assert_equal(data, dc.get("test"))
end
end
end
dalli-2.7.9/test/test_dalli.rb 0000664 0000000 0000000 00000052576 13362715446 0016300 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'Dalli' do
describe 'options parsing' do
it 'handle deprecated options' do
dc = Dalli::Client.new('foo', :compression => true)
assert dc.instance_variable_get(:@options)[:compress]
refute dc.instance_variable_get(:@options)[:compression]
end
it 'not warn about valid options' do
dc = Dalli::Client.new('foo', :compress => true)
# Rails.logger.expects :warn
assert dc.instance_variable_get(:@options)[:compress]
end
it 'raises error with invalid expires_in' do
bad_data = [{:bad => 'expires in data'}, Hash, [1,2,3]]
bad_data.each do |bad|
assert_raises ArgumentError do
Dalli::Client.new('foo', {:expires_in => bad})
end
end
end
it 'return string type for namespace attribute' do
dc = Dalli::Client.new('foo', :namespace => :wunderschoen)
assert_equal "wunderschoen", dc.send(:namespace)
dc.close
dc = Dalli::Client.new('foo', :namespace => Proc.new{:wunderschoen})
assert_equal "wunderschoen", dc.send(:namespace)
dc.close
end
end
describe 'key validation' do
it 'not allow blanks' do
memcached_persistent do |dc|
dc.set ' ', 1
assert_equal 1, dc.get(' ')
dc.set "\t", 1
assert_equal 1, dc.get("\t")
dc.set "\n", 1
assert_equal 1, dc.get("\n")
assert_raises ArgumentError do
dc.set "", 1
end
assert_raises ArgumentError do
dc.set nil, 1
end
end
end
it 'allow namespace to be a symbol' do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :namespace => :wunderschoen)
dc.set "x" * 251, 1
assert 1, dc.get("#{'x' * 200}:md5:#{Digest::MD5.hexdigest('x' * 251)}")
end
end
end
describe 'ttl validation' do
it 'generated an ArgumentError for ttl that does not support to_i' do
memcached_persistent do |dc|
assert_raises ArgumentError do
dc.set('foo', 'bar', [])
end
end
end
end
it "default to localhost:11211" do
dc = Dalli::Client.new
ring = dc.send(:ring)
s1 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
dc = Dalli::Client.new('localhost:11211')
ring = dc.send(:ring)
s2 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
dc = Dalli::Client.new(['localhost:11211'])
ring = dc.send(:ring)
s3 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
assert_equal '127.0.0.1', s1
assert_equal s2, s3
end
it "accept comma separated string" do
dc = Dalli::Client.new("server1.example.com:11211,server2.example.com:11211")
ring = dc.send(:ring)
assert_equal 2, ring.servers.size
s1,s2 = ring.servers.map(&:hostname)
assert_equal "server1.example.com", s1
assert_equal "server2.example.com", s2
end
it "accept array of servers" do
dc = Dalli::Client.new(["server1.example.com:11211","server2.example.com:11211"])
ring = dc.send(:ring)
assert_equal 2, ring.servers.size
s1,s2 = ring.servers.map(&:hostname)
assert_equal "server1.example.com", s1
assert_equal "server2.example.com", s2
end
describe 'using a live server' do
it "support get/set" do
memcached_persistent do |dc|
dc.flush
val1 = "1234567890"*105000
assert_equal false, dc.set('a', val1)
val1 = "1234567890"*100000
dc.set('a', val1)
val2 = dc.get('a')
assert_equal val1, val2
assert op_addset_succeeds(dc.set('a', nil))
assert_nil dc.get('a')
end
end
it 'supports delete' do
memcached_persistent do |dc|
dc.set('some_key', 'some_value')
assert_equal 'some_value', dc.get('some_key')
dc.delete('some_key')
assert_nil dc.get('some_key')
end
end
it 'returns nil for nonexist key' do
memcached_persistent do |dc|
assert_nil dc.get('notexist')
end
end
it 'allows "Not found" as value' do
memcached_persistent do |dc|
dc.set('key1', 'Not found')
assert_equal 'Not found', dc.get('key1')
end
end
it "support stats" do
memcached_persistent do |dc|
# make sure that get_hits would not equal 0
dc.set(:a, "1234567890"*100000)
dc.get(:a)
stats = dc.stats
servers = stats.keys
assert(servers.any? do |s|
stats[s]["get_hits"].to_i != 0
end, "general stats failed")
stats_items = dc.stats(:items)
servers = stats_items.keys
assert(servers.all? do |s|
stats_items[s].keys.any? do |key|
key =~ /items:[0-9]+:number/
end
end, "stats items failed")
stats_slabs = dc.stats(:slabs)
servers = stats_slabs.keys
assert(servers.all? do |s|
stats_slabs[s].keys.any? do |key|
key == "active_slabs"
end
end, "stats slabs failed")
# reset_stats test
results = dc.reset_stats
assert(results.all? { |x| x })
stats = dc.stats
servers = stats.keys
# check if reset was performed
servers.each do |s|
assert_equal 0, dc.stats[s]["get_hits"].to_i
end
end
end
it "support the fetch operation" do
memcached_persistent do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
executed = false
value = dc.fetch('fetch_key') do
executed = true
expected
end
assert_equal expected, value
assert_equal true, executed
executed = false
value = dc.fetch('fetch_key') do
executed = true
expected
end
assert_equal expected, value
assert_equal false, executed
end
end
it "support the fetch operation with falsey values" do
memcached_persistent do |dc|
dc.flush
dc.set("fetch_key", false)
res = dc.fetch("fetch_key") { flunk "fetch block called" }
assert_equal false, res
end
end
it "support the fetch operation with nil values when cache_nils: true" do
memcached_persistent(21345, cache_nils: true) do |dc|
dc.flush
dc.set("fetch_key", nil)
res = dc.fetch("fetch_key") { flunk "fetch block called" }
assert_nil res
end
memcached_persistent(21345, cache_nils: false) do |dc|
dc.flush
dc.set("fetch_key", nil)
executed = false
res = dc.fetch("fetch_key") { executed = true; 'bar' }
assert_equal 'bar', res
assert_equal true, executed
end
end
it "support the cas operation" do
memcached_persistent do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
resp = dc.cas('cas_key') do |value|
fail('Value it not exist')
end
assert_nil resp
mutated = { 'blah' => 'foo!' }
dc.set('cas_key', expected)
resp = dc.cas('cas_key') do |value|
assert_equal expected, value
mutated
end
assert op_cas_succeeds(resp)
resp = dc.get('cas_key')
assert_equal mutated, resp
end
end
it "support the cas! operation" do
memcached_persistent do |dc|
dc.flush
mutated = { 'blah' => 'foo!' }
resp = dc.cas!('cas_key') do |value|
assert_nil value
mutated
end
assert op_cas_succeeds(resp)
resp = dc.get('cas_key')
assert_equal mutated, resp
end
end
it "support multi-get" do
memcached_persistent do |dc|
dc.close
dc.flush
resp = dc.get_multi(%w(a b c d e f))
assert_equal({}, resp)
dc.set('a', 'foo')
dc.set('b', 123)
dc.set('c', %w(a b c))
# Invocation without block
resp = dc.get_multi(%w(a b c d e f))
expected_resp = { 'a' => 'foo', 'b' => 123, 'c' => %w(a b c) }
assert_equal(expected_resp, resp)
# Invocation with block
dc.get_multi(%w(a b c d e f)) do |k, v|
assert(expected_resp.has_key?(k) && expected_resp[k] == v)
expected_resp.delete(k)
end
assert expected_resp.empty?
# Perform a big multi-get with 1000 elements.
arr = []
dc.multi do
1000.times do |idx|
dc.set idx, idx
arr << idx
end
end
result = dc.get_multi(arr)
assert_equal(1000, result.size)
assert_equal(50, result['50'])
end
end
it 'support raw incr/decr' do
memcached_persistent do |client|
client.flush
assert op_addset_succeeds(client.set('fakecounter', 0, 0, :raw => true))
assert_equal 1, client.incr('fakecounter', 1)
assert_equal 2, client.incr('fakecounter', 1)
assert_equal 3, client.incr('fakecounter', 1)
assert_equal 1, client.decr('fakecounter', 2)
assert_equal "1", client.get('fakecounter', :raw => true)
resp = client.incr('mycounter', 0)
assert_nil resp
resp = client.incr('mycounter', 1, 0, 2)
assert_equal 2, resp
resp = client.incr('mycounter', 1)
assert_equal 3, resp
resp = client.set('rawcounter', 10, 0, :raw => true)
assert op_cas_succeeds(resp)
resp = client.get('rawcounter', :raw => true)
assert_equal '10', resp
resp = client.incr('rawcounter', 1)
assert_equal 11, resp
end
end
it "support incr/decr operations" do
memcached_persistent do |dc|
dc.flush
resp = dc.decr('counter', 100, 5, 0)
assert_equal 0, resp
resp = dc.decr('counter', 10)
assert_equal 0, resp
resp = dc.incr('counter', 10)
assert_equal 10, resp
current = 10
100.times do |x|
resp = dc.incr('counter', 10)
assert_equal current + ((x+1)*10), resp
end
resp = dc.decr('10billion', 0, 5, 10)
# go over the 32-bit mark to verify proper (un)packing
resp = dc.incr('10billion', 10_000_000_000)
assert_equal 10_000_000_010, resp
resp = dc.decr('10billion', 1)
assert_equal 10_000_000_009, resp
resp = dc.decr('10billion', 0)
assert_equal 10_000_000_009, resp
resp = dc.incr('10billion', 0)
assert_equal 10_000_000_009, resp
assert_nil dc.incr('DNE', 10)
assert_nil dc.decr('DNE', 10)
resp = dc.incr('big', 100, 5, 0xFFFFFFFFFFFFFFFE)
assert_equal 0xFFFFFFFFFFFFFFFE, resp
resp = dc.incr('big', 1)
assert_equal 0xFFFFFFFFFFFFFFFF, resp
# rollover the 64-bit value, we'll get something undefined.
resp = dc.incr('big', 1)
refute_equal 0x10000000000000000, resp
dc.reset
end
end
it 'support the append and prepend operations' do
memcached_persistent do |dc|
dc.flush
assert op_addset_succeeds(dc.set('456', 'xyz', 0, :raw => true))
assert_equal true, dc.prepend('456', '0')
assert_equal true, dc.append('456', '9')
assert_equal '0xyz9', dc.get('456', :raw => true)
assert_equal '0xyz9', dc.get('456')
assert_equal false, dc.append('nonexist', 'abc')
assert_equal false, dc.prepend('nonexist', 'abc')
end
end
it 'supports replace operation' do
memcached_persistent do |dc|
dc.flush
dc.set('key', 'value')
assert op_replace_succeeds(dc.replace('key', 'value2'))
assert_equal 'value2', dc.get('key')
end
end
it 'support touch operation' do
memcached_persistent do |dc|
begin
dc.flush
dc.set 'key', 'value'
assert_equal true, dc.touch('key', 10)
assert_equal true, dc.touch('key')
assert_equal 'value', dc.get('key')
assert_nil dc.touch('notexist')
rescue Dalli::DalliError => e
# This will happen when memcached is in lesser version than 1.4.8
assert_equal 'Response error 129: Unknown command', e.message
end
end
end
it 'support version operation' do
memcached_persistent do |dc|
v = dc.version
servers = v.keys
assert(servers.any? do |s|
v[s] != nil
end, "version failed")
end
end
it 'allow TCP connections to be configured for keepalive' do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :keepalive => true)
dc.set(:a, 1)
ring = dc.send(:ring)
server = ring.servers.first
socket = server.instance_variable_get('@sock')
optval = socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE)
optval = optval.unpack 'i'
assert_equal true, (optval[0] != 0)
end
end
it 'allow TCP connections to configure SO_RCVBUF' do
memcached_persistent do |dc, port|
value = 5000
dc = Dalli::Client.new("localhost:#{port}", :rcvbuf => value)
dc.set(:a, 1)
ring = dc.send(:ring)
server = ring.servers.first
socket = server.instance_variable_get('@sock')
optval = socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_RCVBUF)
expected = jruby? ? value : value * 2
assert_equal expected, optval.unpack('i')[0]
end
end
it 'allow TCP connections to configure SO_SNDBUF' do
memcached_persistent do |dc, port|
value = 5000
dc = Dalli::Client.new("localhost:#{port}", :sndbuf => value)
dc.set(:a, 1)
ring = dc.send(:ring)
server = ring.servers.first
socket = server.instance_variable_get('@sock')
optval = socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_SNDBUF)
expected = jruby? ? value : value * 2
assert_equal expected, optval.unpack('i')[0]
end
end
it "pass a simple smoke test" do
memcached_persistent do |dc, port|
resp = dc.flush
refute_nil resp
assert_equal [true, true], resp
assert op_addset_succeeds(dc.set(:foo, 'bar'))
assert_equal 'bar', dc.get(:foo)
resp = dc.get('123')
assert_nil resp
assert op_addset_succeeds(dc.set('123', 'xyz'))
resp = dc.get('123')
assert_equal 'xyz', resp
assert op_addset_succeeds(dc.set('123', 'abc'))
dc.prepend('123', '0')
dc.append('123', '0')
assert_raises Dalli::UnmarshalError do
resp = dc.get('123')
end
dc.close
dc = nil
dc = Dalli::Client.new("localhost:#{port}")
assert op_addset_succeeds(dc.set('456', 'xyz', 0, :raw => true))
resp = dc.prepend '456', '0'
assert_equal true, resp
resp = dc.append '456', '9'
assert_equal true, resp
resp = dc.get('456', :raw => true)
assert_equal '0xyz9', resp
assert op_addset_succeeds(dc.set('456', false))
resp = dc.get('456')
assert_equal false, resp
resp = dc.stats
assert_equal Hash, resp.class
dc.close
end
end
it "pass a simple smoke test on unix socket" do
memcached_persistent(MemcachedMock::UNIX_SOCKET_PATH) do |dc, path|
resp = dc.flush
refute_nil resp
assert_equal [true], resp
assert op_addset_succeeds(dc.set(:foo, 'bar'))
assert_equal 'bar', dc.get(:foo)
resp = dc.get('123')
assert_nil resp
assert op_addset_succeeds(dc.set('123', 'xyz'))
resp = dc.get('123')
assert_equal 'xyz', resp
assert op_addset_succeeds(dc.set('123', 'abc'))
dc.prepend('123', '0')
dc.append('123', '0')
assert_raises Dalli::UnmarshalError do
resp = dc.get('123')
end
dc.close
dc = nil
dc = Dalli::Client.new(path)
assert op_addset_succeeds(dc.set('456', 'xyz', 0, :raw => true))
resp = dc.prepend '456', '0'
assert_equal true, resp
resp = dc.append '456', '9'
assert_equal true, resp
resp = dc.get('456', :raw => true)
assert_equal '0xyz9', resp
assert op_addset_succeeds(dc.set('456', false))
resp = dc.get('456')
assert_equal false, resp
resp = dc.stats
assert_equal Hash, resp.class
dc.close
end
end
it "support multithreaded access" do
memcached_persistent do |cache|
cache.flush
workers = []
cache.set('f', 'zzz')
assert op_cas_succeeds((cache.cas('f') do |value|
value << 'z'
end))
assert_equal 'zzzz', cache.get('f')
# Have a bunch of threads perform a bunch of operations at the same time.
# Verify the result of each operation to ensure the request and response
# are not intermingled between threads.
10.times do
workers << Thread.new do
100.times do
cache.set('a', 9)
cache.set('b', 11)
inc = cache.incr('cat', 10, 0, 10)
cache.set('f', 'zzz')
res = cache.cas('f') do |value|
value << 'z'
end
refute_nil res
assert_equal false, cache.add('a', 11)
assert_equal({ 'a' => 9, 'b' => 11 }, cache.get_multi(['a', 'b']))
inc = cache.incr('cat', 10)
assert_equal 0, inc % 5
cache.decr('cat', 5)
assert_equal 11, cache.get('b')
assert_equal %w(a b), cache.get_multi('a', 'b', 'c').keys.sort
end
end
end
workers.each { |w| w.join }
cache.flush
end
end
it "handle namespaced keys" do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :namespace => 'a')
dc.set('namespaced', 1)
dc2 = Dalli::Client.new("localhost:#{port}", :namespace => 'b')
dc2.set('namespaced', 2)
assert_equal 1, dc.get('namespaced')
assert_equal 2, dc2.get('namespaced')
end
end
it "handle nil namespace" do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :namespace => nil)
assert_equal 'key', dc.send(:validate_key, 'key')
end
end
it 'truncate cache keys that are too long' do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :namespace => 'some:namspace')
key = "this cache key is far too long so it must be hashed and truncated and stuff" * 10
value = "some value"
assert op_addset_succeeds(dc.set(key, value))
assert_equal value, dc.get(key)
end
end
it "handle namespaced keys in multi_get" do
memcached_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :namespace => 'a')
dc.set('a', 1)
dc.set('b', 2)
assert_equal({'a' => 1, 'b' => 2}, dc.get_multi('a', 'b'))
end
end
it 'handle special Regexp characters in namespace with get_multi' do
memcached_persistent do |dc, port|
# /(?!)/ is a contradictory PCRE and should never be able to match
dc = Dalli::Client.new("localhost:#{port}", :namespace => '(?!)')
dc.set('a', 1)
dc.set('b', 2)
assert_equal({'a' => 1, 'b' => 2}, dc.get_multi('a', 'b'))
end
end
it "handle application marshalling issues" do
memcached_persistent do |dc|
old = Dalli.logger
Dalli.logger = Logger.new(nil)
begin
assert_equal false, dc.set('a', Proc.new { true })
ensure
Dalli.logger = old
end
end
end
describe 'with compression' do
it 'allow large values' do
memcached_persistent do |dc|
dalli = Dalli::Client.new(dc.instance_variable_get(:@servers), :compress => true)
value = "0"*1024*1024
assert_equal false, dc.set('verylarge', value)
dalli.set('verylarge', value)
end
end
it 'allow large values to be set' do
memcached_persistent do |dc|
value = "0"*1024*1024
assert dc.set('verylarge', value, nil, :compress => true)
end
end
end
describe 'in low memory conditions' do
it 'handle error response correctly' do
memcached_low_mem_persistent do |dc|
failed = false
value = "1234567890"*100
1_000.times do |idx|
begin
assert op_addset_succeeds(dc.set(idx, value))
rescue Dalli::DalliError
failed = true
assert((800..960).include?(idx), "unexpected failure on iteration #{idx}")
break
end
end
assert failed, 'did not fail under low memory conditions'
end
end
it 'fit more values with compression' do
memcached_low_mem_persistent do |dc, port|
dalli = Dalli::Client.new("localhost:#{port}", :compress => true)
failed = false
value = "1234567890"*1000
10_000.times do |idx|
begin
assert op_addset_succeeds(dalli.set(idx, value))
rescue Dalli::DalliError
failed = true
assert((6000..7800).include?(idx), "unexpected failure on iteration #{idx}")
break
end
end
assert failed, 'did not fail under low memory conditions'
end
end
end
end
end
dalli-2.7.9/test/test_encoding.rb 0000664 0000000 0000000 00000001236 13362715446 0016764 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# frozen_string_literal: true
require_relative 'helper'
describe 'Encoding' do
describe 'using a live server' do
it 'support i18n content' do
memcached_persistent do |dc|
key = 'foo'
utf_key = utf8 = 'ƒ©åÍÎ'
assert dc.set(key, utf8)
assert_equal utf8, dc.get(key)
dc.set(utf_key, utf8)
assert_equal utf8, dc.get(utf_key)
end
end
it 'support content expiry' do
memcached_persistent do |dc|
key = 'foo'
assert dc.set(key, 'bar', 1)
assert_equal 'bar', dc.get(key)
sleep 1.2
assert_nil dc.get(key)
end
end
end
end
dalli-2.7.9/test/test_failover.rb 0000664 0000000 0000000 00000007622 13362715446 0017012 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'failover' do
describe 'timeouts' do
it 'not lead to corrupt sockets' do
memcached_persistent do |dc|
value = {:test => "123"}
begin
Timeout.timeout 0.01 do
start_time = Time.now
10_000.times do
dc.set("test_123", value)
end
flunk("Did not timeout in #{Time.now - start_time}")
end
rescue Timeout::Error
end
assert_equal(value, dc.get("test_123"))
end
end
end
describe 'assuming some bad servers' do
it 'silently reconnect if server hiccups' do
server_port = 30124
memcached_persistent(server_port) do |dc, port|
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(port)
memcached_persistent(port) do
foo = dc.get 'foo'
assert_nil foo
memcached_kill(port)
end
end
end
it 'handle graceful failover' do
port_1 = 31777
port_2 = 32113
memcached_persistent(port_1) do |first_dc, first_port|
memcached_persistent(port_2) do |second_dc, second_port|
dc = Dalli::Client.new ["localhost:#{first_port}", "localhost:#{second_port}"]
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(first_port)
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(second_port)
assert_raises Dalli::RingError, :message => "No server available" do
dc.set 'foo', 'bar'
end
end
end
end
it 'handle them gracefully in get_multi' do
port_1 = 32971
port_2 = 34312
memcached_persistent(port_1) do |first_dc, first_port|
memcached(port_2) do |second_dc, second_port|
dc = Dalli::Client.new ["localhost:#{first_port}", "localhost:#{second_port}"]
dc.set 'a', 'a1'
result = dc.get_multi ['a']
assert_equal result, {'a' => 'a1'}
memcached_kill(first_port)
result = dc.get_multi ['a']
assert_equal result, {'a' => 'a1'}
end
end
end
it 'handle graceful failover in get_multi' do
port_1 = 34541
port_2 = 33044
memcached_persistent(port_1) do |first_dc, first_port|
memcached_persistent(port_2) do |second_dc, second_port|
dc = Dalli::Client.new ["localhost:#{first_port}", "localhost:#{second_port}"]
dc.set 'foo', 'foo1'
dc.set 'bar', 'bar1'
result = dc.get_multi ['foo', 'bar']
assert_equal result, {'foo' => 'foo1', 'bar' => 'bar1'}
memcached_kill(first_port)
dc.set 'foo', 'foo1'
dc.set 'bar', 'bar1'
result = dc.get_multi ['foo', 'bar']
assert_equal result, {'foo' => 'foo1', 'bar' => 'bar1'}
memcached_kill(second_port)
result = dc.get_multi ['foo', 'bar']
assert_equal result, {}
end
end
end
it 'stats it still properly report' do
port_1 = 34547
port_2 = 33219
memcached_persistent(port_1) do |first_dc, first_port|
memcached_persistent(port_2) do |second_dc, second_port|
dc = Dalli::Client.new ["localhost:#{first_port}", "localhost:#{second_port}"]
result = dc.stats
assert_instance_of Hash, result["localhost:#{first_port}"]
assert_instance_of Hash, result["localhost:#{second_port}"]
memcached_kill(first_port)
dc = Dalli::Client.new ["localhost:#{first_port}", "localhost:#{second_port}"]
result = dc.stats
assert_instance_of NilClass, result["localhost:#{first_port}"]
assert_instance_of Hash, result["localhost:#{second_port}"]
memcached_kill(second_port)
end
end
end
end
end
dalli-2.7.9/test/test_network.rb 0000664 0000000 0000000 00000003664 13362715446 0016676 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'Network' do
describe 'assuming a bad network' do
it 'handle no server available' do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new 'localhost:19333'
dc.get 'foo'
end
end
describe 'with a fake server' do
it 'handle connection reset' do
memcached_mock(lambda {|sock| sock.close }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle connection reset with unix socket' do
socket_path = MemcachedMock::UNIX_SOCKET_PATH
memcached_mock(lambda {|sock| sock.close }, :start_unix, socket_path) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new(socket_path)
dc.get('abc')
end
end
end
it 'handle malformed response' do
memcached_mock(lambda {|sock| sock.write('123') }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle connect timeouts' do
memcached_mock(lambda {|sock| sleep(0.6); sock.close }, :delayed_start) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle read timeouts' do
memcached_mock(lambda {|sock| sleep(0.6); sock.write('giraffe') }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
end
end
end
dalli-2.7.9/test/test_rack_session.rb 0000664 0000000 0000000 00000031100 13362715446 0017652 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
require 'rack/session/dalli'
require 'rack/lint'
require 'rack/mock'
require 'thread'
describe Rack::Session::Dalli do
before do
@port = 19129
memcached_persistent(@port)
Rack::Session::Dalli::DEFAULT_DALLI_OPTIONS[:memcache_server] = "localhost:#{@port}"
# test memcache connection
Rack::Session::Dalli.new(incrementor)
end
let(:session_key) { Rack::Session::Dalli::DEFAULT_OPTIONS[:key] }
let(:session_match) do
/#{session_key}=([0-9a-fA-F]+);/
end
let(:incrementor_proc) do
lambda do |env|
env["rack.session"]["counter"] ||= 0
env["rack.session"]["counter"] += 1
Rack::Response.new(env["rack.session"].inspect).to_a
end
end
let(:drop_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:drop] = true
incrementor_proc.call(env)
end)
end
let(:renew_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:renew] = true
incrementor_proc.call(env)
end)
end
let(:defer_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:defer] = true
incrementor_proc.call(env)
end)
end
let(:skip_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:skip] = true
incrementor_proc.call(env)
end)
end
let(:incrementor) { Rack::Lint.new(incrementor_proc) }
it "faults on no connection" do
assert_raises Dalli::RingError do
Rack::Session::Dalli.new(incrementor, :memcache_server => 'nosuchserver')
end
end
it "connects to existing server" do
assert_silent do
rsd = Rack::Session::Dalli.new(incrementor, :namespace => 'test:rack:session')
rsd.pool.set('ping', '')
end
end
it "passes options to MemCache" do
opts = {
:namespace => 'test:rack:session',
:compression_min_size => 1234
}
rsd = Rack::Session::Dalli.new(incrementor, opts)
assert_equal(opts[:namespace], rsd.pool.instance_eval { @options[:namespace] })
assert_equal(opts[:compression_min_size], rsd.pool.instance_eval { @options[:compression_min_size] })
end
it "accepts and prioritizes a :cache option" do
server = Rack::Session::Dalli::DEFAULT_DALLI_OPTIONS[:memcache_server]
cache = Dalli::Client.new(server, :namespace => 'test:rack:session')
rsd = Rack::Session::Dalli.new(incrementor, :cache => cache, :namespace => 'foobar')
assert_equal('test:rack:session', rsd.pool.instance_eval { @options[:namespace] })
end
it "generates sids without an existing Dalli::Client" do
rsd = Rack::Session::Dalli.new(incrementor)
assert rsd.send :generate_sid
end
it "upgrades to a connection pool" do
opts = {
:namespace => 'test:rack:session',
:pool_size => 10
}
with_connectionpool do
rsd = Rack::Session::Dalli.new(incrementor, opts)
assert rsd.pool.is_a? ConnectionPool
rsd.pool.with do |mc|
assert mc.instance_eval { !@options[:threadsafe] }
assert_equal(opts[:namespace], mc.instance_eval { @options[:namespace] })
end
end
end
it "creates a new cookie" do
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).get("/")
assert res["Set-Cookie"].include?("#{session_key}=")
assert_equal '{"counter"=>1}', res.body
end
it "determines session from a cookie" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
cookie = res["Set-Cookie"]
assert_equal '{"counter"=>2}', req.get("/", "HTTP_COOKIE" => cookie).body
assert_equal '{"counter"=>3}', req.get("/", "HTTP_COOKIE" => cookie).body
end
it "determines session only from a cookie by default" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
sid = res["Set-Cookie"][session_match, 1]
assert_equal '{"counter"=>1}', req.get("/?rack.session=#{sid}").body
assert_equal '{"counter"=>1}', req.get("/?rack.session=#{sid}").body
end
it "determines session from params" do
rsd = Rack::Session::Dalli.new(incrementor, :cookie_only => false)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
sid = res["Set-Cookie"][session_match, 1]
assert_equal '{"counter"=>2}', req.get("/?rack.session=#{sid}").body
assert_equal '{"counter"=>3}', req.get("/?rack.session=#{sid}").body
end
it "survives nonexistant cookies" do
bad_cookie = "rack.session=blarghfasel"
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).
get("/", "HTTP_COOKIE" => bad_cookie)
assert_equal '{"counter"=>1}', res.body
cookie = res["Set-Cookie"][session_match]
refute_match(/#{bad_cookie}/, cookie)
end
it "survives nonexistant blank cookies" do
bad_cookie = "rack.session="
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).
get("/", "HTTP_COOKIE" => bad_cookie)
cookie = res["Set-Cookie"][session_match]
refute_match(/#{bad_cookie}$/, cookie)
end
it "sets an expiration on new sessions" do
rsd = Rack::Session::Dalli.new(incrementor, :expire_after => 3)
res = Rack::MockRequest.new(rsd).get('/')
assert res.body.include?('"counter"=>1')
cookie = res["Set-Cookie"]
puts 'Sleeping to expire session' if $DEBUG
sleep 4
res = Rack::MockRequest.new(rsd).get('/', "HTTP_COOKIE" => cookie)
refute_equal cookie, res["Set-Cookie"]
assert res.body.include?('"counter"=>1')
end
it "maintains freshness of existing sessions" do
rsd = Rack::Session::Dalli.new(incrementor, :expire_after => 3)
res = Rack::MockRequest.new(rsd).get('/')
assert res.body.include?('"counter"=>1')
cookie = res["Set-Cookie"]
res = Rack::MockRequest.new(rsd).get('/', "HTTP_COOKIE" => cookie)
assert_equal cookie, res["Set-Cookie"]
assert res.body.include?('"counter"=>2')
puts 'Sleeping to expire session' if $DEBUG
sleep 4
res = Rack::MockRequest.new(rsd).get('/', "HTTP_COOKIE" => cookie)
refute_equal cookie, res["Set-Cookie"]
assert res.body.include?('"counter"=>1')
end
it "does not send the same session id if it did not change" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res0 = req.get("/")
cookie = res0["Set-Cookie"][session_match]
assert_equal '{"counter"=>1}', res0.body
res1 = req.get("/", "HTTP_COOKIE" => cookie)
assert_nil res1["Set-Cookie"]
assert_equal '{"counter"=>2}', res1.body
res2 = req.get("/", "HTTP_COOKIE" => cookie)
assert_nil res2["Set-Cookie"]
assert_equal '{"counter"=>3}', res2.body
end
it "deletes cookies with :drop option" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
drop = Rack::Utils::Context.new(rsd, drop_session)
dreq = Rack::MockRequest.new(drop)
res1 = req.get("/")
session = (cookie = res1["Set-Cookie"])[session_match]
assert_equal '{"counter"=>1}', res1.body
res2 = dreq.get("/", "HTTP_COOKIE" => cookie)
assert_nil res2["Set-Cookie"]
assert_equal '{"counter"=>2}', res2.body
res3 = req.get("/", "HTTP_COOKIE" => cookie)
refute_equal session, res3["Set-Cookie"][session_match]
assert_equal '{"counter"=>1}', res3.body
end
it "provides new session id with :renew option" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
renew = Rack::Utils::Context.new(rsd, renew_session)
rreq = Rack::MockRequest.new(renew)
res1 = req.get("/")
session = (cookie = res1["Set-Cookie"])[session_match]
assert_equal '{"counter"=>1}', res1.body
res2 = rreq.get("/", "HTTP_COOKIE" => cookie)
new_cookie = res2["Set-Cookie"]
new_session = new_cookie[session_match]
refute_equal session, new_session
assert_equal '{"counter"=>2}', res2.body
res3 = req.get("/", "HTTP_COOKIE" => new_cookie)
assert_equal '{"counter"=>3}', res3.body
# Old cookie was deleted
res4 = req.get("/", "HTTP_COOKIE" => cookie)
assert_equal '{"counter"=>1}', res4.body
end
it "omits cookie with :defer option but still updates the state" do
rsd = Rack::Session::Dalli.new(incrementor)
count = Rack::Utils::Context.new(rsd, incrementor)
defer = Rack::Utils::Context.new(rsd, defer_session)
dreq = Rack::MockRequest.new(defer)
creq = Rack::MockRequest.new(count)
res0 = dreq.get("/")
assert_nil res0["Set-Cookie"]
assert_equal '{"counter"=>1}', res0.body
res0 = creq.get("/")
res1 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res1.body
res2 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>3}', res2.body
end
it "omits cookie and state update with :skip option" do
rsd = Rack::Session::Dalli.new(incrementor)
count = Rack::Utils::Context.new(rsd, incrementor)
skip = Rack::Utils::Context.new(rsd, skip_session)
sreq = Rack::MockRequest.new(skip)
creq = Rack::MockRequest.new(count)
res0 = sreq.get("/")
assert_nil res0["Set-Cookie"]
assert_equal '{"counter"=>1}', res0.body
res0 = creq.get("/")
res1 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res1.body
res2 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res2.body
end
it "updates deep hashes correctly" do
hash_check = proc do |env|
session = env['rack.session']
unless session.include? 'test'
session.update :a => :b, :c => { :d => :e },
:f => { :g => { :h => :i} }, 'test' => true
else
session[:f][:g][:h] = :j
end
[200, {}, [session.inspect]]
end
rsd = Rack::Session::Dalli.new(hash_check)
req = Rack::MockRequest.new(rsd)
res0 = req.get("/")
session_id = (cookie = res0["Set-Cookie"])[session_match, 1]
ses0 = rsd.pool.get(session_id, true)
req.get("/", "HTTP_COOKIE" => cookie)
ses1 = rsd.pool.get(session_id, true)
refute_equal ses0, ses1
end
# anyone know how to do this better?
it "cleanly merges sessions when multithreaded" do
unless $DEBUG
assert_equal 1, 1 # fake assertion to appease the mighty bacon
next
end
warn 'Running multithread test for Session::Dalli'
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get('/')
assert_equal '{"counter"=>1}', res.body
cookie = res["Set-Cookie"]
session_id = cookie[session_match, 1]
delta_incrementor = lambda do |env|
# emulate disconjoinment of threading
env['rack.session'] = env['rack.session'].dup
Thread.stop
env['rack.session'][(Time.now.usec*rand).to_i] = true
incrementor.call(env)
end
tses = Rack::Utils::Context.new rsd, delta_incrementor
treq = Rack::MockRequest.new(tses)
tnum = rand(7).to_i+5
r = Array.new(tnum) do
Thread.new(treq) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"counter"=>2')
end
session = rsd.pool.get(session_id)
assert_equal tnum+1, session.size # counter
assert_equal 2, session['counter'] # meeeh
tnum = rand(7).to_i+5
r = Array.new(tnum) do |i|
app = Rack::Utils::Context.new rsd, time_delta
req = Rack::MockRequest.new app
Thread.new(req) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"counter"=>3')
end
session = rsd.pool.get(session_id)
assert_equal tnum+1, session.size
assert_equal 3, session['counter']
drop_counter = proc do |env|
env['rack.session'].delete 'counter'
env['rack.session']['foo'] = 'bar'
[200, {'Content-Type'=>'text/plain'}, env['rack.session'].inspect]
end
tses = Rack::Utils::Context.new rsd, drop_counter
treq = Rack::MockRequest.new(tses)
tnum = rand(7).to_i+5
r = Array.new(tnum) do
Thread.new(treq) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"foo"=>"bar"')
end
session = rsd.pool.get(session_id)
assert_equal r.size+1, session.size
assert_nil session['counter']
assert_equal 'bar', session['foo']
end
end
dalli-2.7.9/test/test_ring.rb 0000664 0000000 0000000 00000005115 13362715446 0016135 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'Ring' do
describe 'a ring of servers' do
it "have the continuum sorted by value" do
servers = [stub(:name => "localhost:11211", :weight => 1),
stub(:name => "localhost:9500", :weight => 1)]
ring = Dalli::Ring.new(servers, {})
previous_value = 0
ring.continuum.each do |entry|
assert entry.value > previous_value
previous_value = entry.value
end
end
it 'raise when no servers are available/defined' do
ring = Dalli::Ring.new([], {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
describe 'containing only a single server' do
it "raise correctly when it's not alive" do
servers = [
Dalli::Server.new("localhost:12345"),
]
ring = Dalli::Ring.new(servers, {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
it "return the server when it's alive" do
servers = [
Dalli::Server.new("localhost:19191"),
]
ring = Dalli::Ring.new(servers, {})
memcached(19191) do |mc|
ring = mc.send(:ring)
assert_equal ring.servers.first.port, ring.server_for_key('test').port
end
end
end
describe 'containing multiple servers' do
it "raise correctly when no server is alive" do
servers = [
Dalli::Server.new("localhost:12345"),
Dalli::Server.new("localhost:12346"),
]
ring = Dalli::Ring.new(servers, {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
it "return an alive server when at least one is alive" do
servers = [
Dalli::Server.new("localhost:12346"),
Dalli::Server.new("localhost:19191"),
]
ring = Dalli::Ring.new(servers, {})
memcached(19191) do |mc|
ring = mc.send(:ring)
assert_equal ring.servers.first.port, ring.server_for_key('test').port
end
end
end
it 'detect when a dead server is up again' do
memcached(19997) do
down_retry_delay = 0.5
dc = Dalli::Client.new(['localhost:19997', 'localhost:19998'], :down_retry_delay => down_retry_delay)
assert_equal 1, dc.stats.values.compact.count
memcached(19998) do
assert_equal 2, dc.stats.values.compact.count
end
end
end
end
end
dalli-2.7.9/test/test_sasl.rb 0000664 0000000 0000000 00000006413 13362715446 0016142 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe 'Sasl' do
# https://github.com/seattlerb/minitest/issues/298
def self.xit(msg, &block)
end
describe 'a server requiring authentication' do
before do
@server = mock()
@server.stubs(:request).returns(true)
@server.stubs(:weight).returns(1)
@server.stubs(:name).returns("localhost:19124")
end
describe 'without authentication credentials' do
before do
ENV['MEMCACHE_USERNAME'] = 'foo'
ENV['MEMCACHE_PASSWORD'] = 'wrongpwd'
end
after do
ENV['MEMCACHE_USERNAME'] = nil
ENV['MEMCACHE_PASSWORD'] = nil
end
xit 'gracefully handle authentication failures' do
memcached_sasl_persistent do |dc|
assert_error Dalli::DalliError, /32/ do
dc.set('abc', 123)
end
end
end
end
xit 'fail SASL authentication with wrong options' do
memcached_sasl_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", :username => 'testuser', :password => 'testtest')
assert_error Dalli::DalliError, /32/ do
dc.set('abc', 123)
end
end
end
# OSX: Create a SASL user for the memcached application like so:
#
# saslpasswd2 -a memcached -c testuser
#
# with password 'testtest'
describe 'in an authenticated environment' do
before do
ENV['MEMCACHE_USERNAME'] = 'testuser'
ENV['MEMCACHE_PASSWORD'] = 'testtest'
end
after do
ENV['MEMCACHE_USERNAME'] = nil
ENV['MEMCACHE_PASSWORD'] = nil
end
xit 'pass SASL authentication' do
memcached_sasl_persistent do |dc|
# I get "Dalli::DalliError: Error authenticating: 32" in OSX
# but SASL works on Heroku servers. YMMV.
assert_equal true, dc.set('abc', 123)
assert_equal 123, dc.get('abc')
results = dc.stats
assert_equal 1, results.size
assert_equal 38, results.values.first.size
end
end
end
xit 'pass SASL authentication with options' do
memcached_sasl_persistent do |dc, port|
dc = Dalli::Client.new("localhost:#{port}", sasl_credentials)
# I get "Dalli::DalliError: Error authenticating: 32" in OSX
# but SASL works on Heroku servers. YMMV.
assert_equal true, dc.set('abc', 123)
assert_equal 123, dc.get('abc')
results = dc.stats
assert_equal 1, results.size
assert_equal 38, results.values.first.size
end
end
it 'pass SASL as URI' do
Dalli::Server.expects(:new).with("localhost:19124",
:username => "testuser", :password => "testtest").returns(@server)
dc = Dalli::Client.new('memcached://testuser:testtest@localhost:19124')
dc.flush_all
end
it 'pass SASL as ring of URIs' do
Dalli::Server.expects(:new).with("localhost:19124",
:username => "testuser", :password => "testtest").returns(@server)
Dalli::Server.expects(:new).with("otherhost:19125",
:username => "testuser2", :password => "testtest2").returns(@server)
dc = Dalli::Client.new(['memcached://testuser:testtest@localhost:19124',
'memcached://testuser2:testtest2@otherhost:19125'])
dc.flush_all
end
end
end
dalli-2.7.9/test/test_serializer.rb 0000664 0000000 0000000 00000001401 13362715446 0017341 0 ustar 00root root 0000000 0000000 # encoding: utf-8
# frozen_string_literal: true
require_relative 'helper'
require 'json'
describe 'Serializer' do
it 'default to Marshal' do
memcached(29198) do |dc|
dc.set 1,2
assert_equal Marshal, dc.instance_variable_get('@ring').servers.first.serializer
end
end
it 'support a custom serializer' do
memcached(29198) do |dc, port|
memcache = Dalli::Client.new("127.0.0.1:#{port}", :serializer => JSON)
memcache.set 1,2
begin
assert_equal JSON, memcache.instance_variable_get('@ring').servers.first.serializer
memcached(21956) do |newdc|
assert newdc.set("json_test", {"foo" => "bar"})
assert_equal({"foo" => "bar"}, newdc.get("json_test"))
end
end
end
end
end
dalli-2.7.9/test/test_server.rb 0000664 0000000 0000000 00000011131 13362715446 0016477 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
require_relative 'helper'
describe Dalli::Server do
describe 'hostname parsing' do
it 'handles unix socket with no weight' do
s = Dalli::Server.new('/var/run/memcached/sock')
assert_equal '/var/run/memcached/sock', s.hostname
assert_equal 1, s.weight
assert_equal :unix, s.socket_type
end
it 'handles unix socket with a weight' do
s = Dalli::Server.new('/var/run/memcached/sock:2')
assert_equal '/var/run/memcached/sock', s.hostname
assert_equal 2, s.weight
assert_equal :unix, s.socket_type
end
it 'handles no port or weight' do
s = Dalli::Server.new('localhost')
assert_equal 'localhost', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles a port, but no weight' do
s = Dalli::Server.new('localhost:11212')
assert_equal 'localhost', s.hostname
assert_equal 11212, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles a port and a weight' do
s = Dalli::Server.new('localhost:11212:2')
assert_equal 'localhost', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles ipv4 addresses' do
s = Dalli::Server.new('127.0.0.1')
assert_equal '127.0.0.1', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles ipv6 addresses' do
s = Dalli::Server.new('[::1]')
assert_equal '::1', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles ipv6 addresses with port' do
s = Dalli::Server.new('[::1]:11212')
assert_equal '::1', s.hostname
assert_equal 11212, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles ipv6 addresses with port and weight' do
s = Dalli::Server.new('[::1]:11212:2')
assert_equal '::1', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles a FQDN' do
s = Dalli::Server.new('my.fqdn.com')
assert_equal 'my.fqdn.com', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
assert_equal :tcp, s.socket_type
end
it 'handles a FQDN with port and weight' do
s = Dalli::Server.new('my.fqdn.com:11212:2')
assert_equal 'my.fqdn.com', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
assert_equal :tcp, s.socket_type
end
it 'throws an exception if the hostname cannot be parsed' do
lambda { Dalli::Server.new('[]') }.must_raise Dalli::DalliError
lambda { Dalli::Server.new('my.fqdn.com:') }.must_raise Dalli::DalliError
lambda { Dalli::Server.new('my.fqdn.com:11212,:2') }.must_raise Dalli::DalliError
lambda { Dalli::Server.new('my.fqdn.com:11212:abc') }.must_raise Dalli::DalliError
end
end
describe 'ttl translation' do
it 'does not translate ttls under 30 days' do
s = Dalli::Server.new('localhost')
assert_equal s.send(:sanitize_ttl, 30*24*60*60), 30*24*60*60
end
it 'translates ttls over 30 days into timestamps' do
s = Dalli::Server.new('localhost')
assert_equal s.send(:sanitize_ttl, 30*24*60*60 + 1), Time.now.to_i + 30*24*60*60+1
end
it 'does not translate ttls which are already timestamps' do
s = Dalli::Server.new('localhost')
timestamp_ttl = Time.now.to_i + 60
assert_equal s.send(:sanitize_ttl, timestamp_ttl), timestamp_ttl
end
end
describe 'guard_max_value' do
it 'yields when size is under max' do
s = Dalli::Server.new('127.0.0.1')
value = OpenStruct.new(:bytesize => 1_048_576)
yielded = false
s.send(:guard_max_value, :foo, value) do
yielded = true
end
assert_equal yielded, true
end
it 'warns when size is over max' do
s = Dalli::Server.new('127.0.0.1')
value = OpenStruct.new(:bytesize => 1_048_577)
Dalli.logger.expects(:warn).once.with("Value for foo over max size: 1048576 <= 1048577")
s.send(:guard_max_value, :foo, value)
end
it 'throws when size is over max and error_over_max_size true' do
s = Dalli::Server.new('127.0.0.1', :error_when_over_max_size => true)
value = OpenStruct.new(:bytesize => 1_048_577)
lambda do
s.send(:guard_max_value, :foo, value)
end.must_raise Dalli::ValueOverMaxSize
end
end
end