pax_global_header 0000666 0000000 0000000 00000000064 14603723563 0014523 g ustar 00root root 0000000 0000000 52 comment=5261d91b210735a5a79367ff9faebde2dea28788
rack-cache-1.17.0/ 0000775 0000000 0000000 00000000000 14603723563 0013572 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/.github/ 0000775 0000000 0000000 00000000000 14603723563 0015132 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14603723563 0017167 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/.github/workflows/development.yml 0000664 0000000 0000000 00000002420 14603723563 0022232 0 ustar 00root root 0000000 0000000 name: Development
on: [push, pull_request]
jobs:
test:
name: ${{matrix.ruby}} on ${{matrix.os}} (${{matrix.gemfile}})
runs-on: ${{matrix.os}}-latest
continue-on-error: ${{matrix.experimental}}
strategy:
matrix:
os:
- ubuntu
- macos
ruby:
- "2.7"
- "3.0"
- "3.1"
- "3.2"
- "3.3"
gemfile:
- gems/rack_v2.rb
- gems/rack_v2-1.rb
- gems/rack_v3.rb
experimental: [false]
include:
- os: ubuntu
ruby: truffleruby
experimental: true
- os: ubuntu
ruby: jruby
experimental: true
- os: ubuntu
ruby: head
experimental: true
env:
BUNDLE_GEMFILE: ${{matrix.gemfile}}
steps:
- uses: actions/checkout@v3
- name: Installing packages (ubuntu)
if: matrix.os == 'ubuntu'
run: sudo apt-get install libmemcached-dev
- name: Installing packages (macos)
if: matrix.os == 'macos'
run: brew install libmemcached
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{matrix.ruby}}
bundler-cache: true
- run: bundle exec rake
rack-cache-1.17.0/.github/workflows/gh-pages.yml 0000664 0000000 0000000 00000001251 14603723563 0021404 0 ustar 00root root 0000000 0000000 name: Github Pages
on:
push:
branches:
- main
permissions:
contents: write
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.7'
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Install and Build 🔧
run: bundle exec rake doc:gh-pages
- name: Deploy 🚀
uses: JamesIves/github-pages-deploy-action@v4.3.3
with:
branch: gh-pages # The branch the action should deploy to.
folder: doc/gh-pages # The folder the action should deploy.
rack-cache-1.17.0/.gitignore 0000664 0000000 0000000 00000000317 14603723563 0015563 0 ustar 00root root 0000000 0000000 \[.
tags
.vimrc
/dist
/coverage
/doc/api
/doc/*.png
/doc/*.pdf
/doc/*.svg
/doc/config
/doc/configuration.html
/doc/index.html
/doc/license.html
/doc/storage.html
/doc/faq.html
/doc/gh-pages
gems.locked
/pkg
rack-cache-1.17.0/CHANGES 0000664 0000000 0000000 00000027445 14603723563 0014601 0 ustar 00root root 0000000 0000000 ## 1.13.0
* Support lowercase Vary and Age headers
## 1.12.0
* Add a fault_tolerant flag to fail-over to stale cache
## 1.11.1
* when ignoring parts of the query, remove query in key when all params are ignored
## 1.11.0
* Add a proc to allow ignoring parts of the query string in the key
## 1.10.0
* Pass Options To Underlying Storage Driver
* bump required ruby version to 2.3
## 1.9.0
* make purge not raise when not implemented
## 1.8.0
* Meta stores will purge keys when no entity store entries are found
## 1.7.2
* Fix key generation for requests with no query strings
## 1.7.0
* Meta stores now receive a third ttl argument to write when use_native_ttl is used.
## 1.6.1
* Revert 'No longer read responses from cache when we already have them'
## 1.6.0
* Noop backend
* No longer read responses from cache when we already have them
* renamed files from entitystore -> entity_store (metastore/cachecontrol/appengine) and added warns for old ones
## 1.5.1
* fix key generation for query strings that include encoded equals
## 1.5.0
* only catch StandardError and not Exception
## 1.4.3
* After overriding the REQUEST_METHOD, store the original request method in "rack.methodoverride.original_method"
## 1.4.1
* Ignore invalid Expires date as per RFC
## 1.4.0
* Not invalidating the cache for preflight CORS request
## 1.3.1 / October 2015
* Support Ruby 1.9
## 1.3 / Octorber 2015
* Ruby 2.0 only
* Gracefully degrade when cache store goes offline
* allow_reload/revalidate is not enabled by default
* Make Rack::Cache multithread friendly
## 1.2 / March 2012
* Fix a cookie leak vulnerability effecting large numbers of Rails 3.x installs:
https://github.com/rtomayko/rack-cache/pull/52
* Never 304 on PUT or POST requests.
* Misc bundler and test tooling fixes.
## 1.1 / September 2011
* Allow (INM/IMS) validation requests through to backend on miss. Makes it
possible to use validation for private / uncacheable responses. A number of
people using Rails's stale?() helper reported that their validation logic was
never kicking in.
* Add rack env rack-cache.force-pass option to bypass rack-cache on
per request basis
* Fix an issue with memcache namespace not being set when using the
:namespace option instead of :prefix_key.
* Fix test failures due to MockResponse changes in recent Rack
version (issue #34)
## 1.0.3 / August 2011
* Fix bug passing options to memcached and dalli
* Document cache_key
## 1.0.1 / April 2011
* Added lib/rack-cache.rb to match package name for auto-requiring machinery.
* Fixed a number of issues caused by Rack::Cache not closing the body received
from the application. Rack::Lock and other middleware use body.close to
signal the true end of request processing so failure to call this method
can result in strange issues (e.g.,
"ThreadError: deadlock; recursive locking")
* Fixed a bug where Rack::Cache would blow up writing the rack env to the meta
store when the env contained an all uppercase key whose value wasn't
marshalable. Passenger and some other stuff write such keys apparently.
* The test suite has moved from test-spec to bacon. This is a short term
solution to the problem of not being able to run tests under Ruby 1.9.x.
The test suite will be moved to basic Test::Unit style sometime in the
future.
## 1.0 / December 2010
* Rack::Cache is 1.0 and will now maintain semantic versioning
* Add Dalli memcache client support and removed support for the unmaintained
memcache-client library. You will need to move your apps to Dalli before
upgrading rack-cache to 1.0.
## 0.5.3 / September 2010
* A matching If-Modified-Since is ignored if an If-None-Match is also provided
and doesn't match. This is in line with RFC 2616.
* Converts string status codes to integers before returns to workaround bad
behaving rack middleware and apps.
* Misc doc clean up.
## 0.5.2 / September 2009
* Exceptions raised from the metastore are not fatal. This makes a lot of
sense in most cases because its okay for the cache to be down - it
shouldn't blow up your app.
## 0.5.1 / June 2009
* Added support for memcached clusters and other advanced
configuration provided by the memcache-client and memcached
libraries. The "metastore" and "entitystore" options can now be
set to a MemCache object or Memcached object:
memcache = MemCache.new(['127.1.1.1', '127.1.1.2'], :namespace => "/foo")
use Rack::Cache,
:metastore => memcache,
:entitystore => memcache
* Fix "memcached://" metastore URL handling. The "memcached" variation
blew up, the "memcache" version was fine.
## 0.5.0 / May 2009
* Added meta and entity store implementations based on the
memcache-client library. These are the default unless the memcached
library has already been required.
* The "allow_reload" and "allow_revalidate" options now default to
false instead of true. This means we break with RFC 2616 out of
the box but this is the expected configuration in a huge majority
of gateway cache scenarios. See the docs on configuration
options for more information on these options:
http://tomayko.com/src/rack-cache/configuration
* Added Google AppEngine memcache entity store and metastore
implementations. To use GAE's memcache with rack-cache, set the
"metastore" and "entitystore" options as follows:
use Rack::Cache,
:metastore => 'gae://cache-meta',
:entitystore => 'gae://cache-body'
The 'cache-meta' and 'cache-body' parts are memcache namespace
prefixes and should be set to different values.
## 0.4.0 / March 2009
* Ruby 1.9.1 / Rack 1.0 compatible.
* Invalidate cache entries that match the request URL on non-GET/HEAD
requests. i.e., POST, PUT, DELETE cause matching cache entries to
be invalidated. The cache entry is validated with the backend using
a conditional GET the next time it's requested.
* Implement "Cache-Control: max-age=N" request directive by forcing
validation when the max-age provided exceeds the age of the cache
entry. This can be disabled by setting the "allow_revalidate" option to
false.
* Properly implement "Cache-Control: no-cache" request directive by
performing a full reload. RFC 2616 states that when "no-cache" is
present in the request, the cache MUST NOT serve a stored response even
after successful validation. This is slightly different from the
"no-cache" directive in responses, which indicates that the cache must
first validate its entry with the origin. Previously, we implemented
"no-cache" on requests by passing so no new cache entry would be stored
based on the response. Now we treat it as a forced miss and enter the
response into the cache if it's cacheable. This can be disabled by
setting the "allow_reload" option to false.
* Assume identical semantics for the "Pragma: no-cache" request header
as the "Cache-Control: no-cache" directive described above.
* Less crazy logging. When the verbose option is set, a single log entry
is written with a comma separated list of trace events. For example, if
the cache was stale but validated, the following log entry would be
written: "cache: stale, valid, store". When the verbose option is false,
no logging occurs.
* Added "X-Rack-Cache" response header with the same comma separated trace
value as described above. This gives some visibility into how the cache
processed the request.
* Add support for canonicalized cache keys, as well as custom cache key
generators, which are specified in the options as :cache_key as either
any object that has a call() or as a block. Cache key generators get
passed a request object and return a cache key string.
## 0.3.0 / December 2008
* Add support for public and private cache control directives. Responses
marked as explicitly public are cached even when the request includes
an Authorization or Cookie header. Responses marked as explicitly private
are considered uncacheable.
* Added a "private_headers" option that dictates which request headers
trigger default "private" cache control processing. By default, the
Cookie and Authorization headers are included. Headers may be added or
removed as necessary to change the default private logic.
* Adhere to must-revalidate/proxy-revalidate cache control directives by
not assigning the default_ttl to responses that don't include freshness
information. This should let us begin using default_ttl more liberally
since we can control it using the must-revalidate/proxy-revalidate directives.
* Use the s-maxage Cache-Control value in preference to max-age when
present. The ttl= method now sets the s-maxage value instead of max-age.
Code that used ttl= to control freshness at the client needs to change
to set the max-age directive explicitly.
* Enable support for X-Sendfile middleware by responding to #to_path on
bodies served from disk storage. Adding the Rack::Sendfile component
upstream from Rack::Cache will result in cached bodies being served
directly by the web server (instead of being read in Ruby).
* BUG: MetaStore hits but EntityStore misses. This would 500 previously; now
we detect it and act as if the MetaStore missed as well.
* Implement low level #purge method on all concrete entity store
classes -- removes the entity body corresponding to the SHA1 key
provided and returns nil.
* Basically sane handling of HEAD requests. A HEAD request is never passed
through to the backend except when transitioning with pass!. This means
that the cache responds to HEAD requests without invoking the backend at
all when the cached entry is fresh. When no cache entry exists, or the
cached entry is stale and can be validated, the backend is invoked with
a GET request and the HEAD is handled right before the response
is delivered upstream.
* BUG: The Age response header was not being set properly when a stale
entry was validated. This would result in Age values that exceeded
the freshness lifetime in responses.
* BUG: A cached entry in a heap meta store could be unintentionally
modified by request processing since the cached objects were being
returned directly. The result was typically missing/incorrect header
values (e.g., missing Content-Type header). [dkubb]
* BUG: 304 responses should not include entity headers (especially
Content-Length). This is causing Safari/WebKit weirdness on 304
responses.
* BUG: The If-None-Match header was being ignored, causing the cache
to send 200 responses to matching conditional GET requests.
## 0.2.0 / 2008-10-24 / Initial Release
* Document events and transitions in `rack/cache/config/default.rb`
* Basic logging support (`trace`, `warn`, `info`, `error` from within Context)
* EntityStore: store entity bodies keyed by SHA
* MetaStore: store response headers keyed by URL
* Last-Modified/ETag validation
* Vary support
* Implement error! transition
* New Rack::Cache::Core
* memcached meta and entity store implementations
* URI based storage configuration
* Read options from Rack env if present (rack-cache.XXX keys)
* `object` is now `entry`
* Documentation framework and website
* Document storage areas and implementations
* Document configuration/events
## 0.1.0 / 2008-07-21 / Proof of concept (unreleased)
* Basic core with event support
* `#import` method for bringing in config files
* Freshness based expiration
* RFC 2616 If-Modified-Since based validation
* A horribly shitty storage back-end (Hash in mem)
* Don't cache hop-by-hop headers: Connection, Keep-Alive, Proxy-Authenticate,
Proxy-Authorization, TE, Trailers, Transfer-Encoding, Upgrade
rack-cache-1.17.0/MIT-LICENSE 0000664 0000000 0000000 00000002047 14603723563 0015231 0 ustar 00root root 0000000 0000000 Copyright (c) 2008 Ryan Tomayko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
rack-cache-1.17.0/README.md 0000664 0000000 0000000 00000007120 14603723563 0015051 0 ustar 00root root 0000000 0000000 Rack::Cache
===========
Rack::Cache is suitable as a quick drop-in component to enable HTTP caching for
Rack-based applications that produce freshness (`expires`, `cache-control`)
and/or validation (`last-modified`, `etag`) information:
* Standards-based (RFC 2616)
* Freshness/expiration based caching
* Validation (`if-modified-since` / `if-none-match`)
* `vary` support
* `cache-control` `public`, `private`, `max-age`, `s-maxage`, `must-revalidate`,
and `proxy-revalidate`.
* Portable: 100% Ruby / works with any Rack-enabled framework
* Disk, memcached, and heap memory storage backends
For more information about Rack::Cache features and usage, see:
https://rack.github.io/rack-cache/
Rack::Cache is not overly optimized for performance. The main goal of the
project is to provide a portable, easy-to-configure, and standards-based
caching solution for small to medium sized deployments. More sophisticated /
high-performance caching systems (e.g., Varnish, Squid, httpd/mod-cache) may be
more appropriate for large deployments with significant throughput requirements.
Installation
------------
gem install rack-cache
Basic Usage
-----------
`Rack::Cache` is implemented as a piece of Rack middleware and can be used with
any Rack-based application. If your application includes a rackup (`.ru`) file
or uses Rack::Builder to construct the application pipeline, simply require
and use as follows:
```Ruby
require 'rack/cache'
use Rack::Cache,
metastore: 'file:/var/cache/rack/meta',
entitystore: 'file:/var/cache/rack/body',
verbose: true
run app
```
Assuming you've designed your backend application to take advantage of HTTP's
caching features, no further code or configuration is required for basic
caching.
Using with Rails
----------------
```Ruby
# config/application.rb
config.action_dispatch.rack_cache = true
# or
config.action_dispatch.rack_cache = {
verbose: true,
metastore: 'file:/var/cache/rack/meta',
entitystore: 'file:/var/cache/rack/body'
}
```
You should now see `Rack::Cache` listed in the middleware pipeline:
rake middleware
[more information](https://snippets.aktagon.com/snippets/302-how-to-setup-and-use-rack-cache-with-rails)
Using with Dalli
----------------
Dalli is a high performance memcached client for Ruby.
More information at: https://github.com/mperham/dalli
```Ruby
require 'dalli'
require 'rack/cache'
use Rack::Cache,
verbose: true,
metastore: "memcached://localhost:11211/meta",
entitystore: "memcached://localhost:11211/body"
run app
```
Noop entity store
-----------------
Does not persist response bodies (no disk/memory used).
Responses from the cache will have an empty body.
Clients must ignore these empty cached response (check for `x-rack-cache` response header).
Atm cannot handle streamed responses, patch needed.
```Ruby
require 'rack/cache'
use Rack::Cache,
verbose: true,
metastore:
entitystore: "noop:/"
run app
```
Ignoring tracking parameters in cache keys
-----------------
It's fairly common to include tracking parameters which don't affect the content
of the page. Since Rack::Cache uses the full URL as part of the cache key, this
can cause unneeded churn in your cache. If you're using the default key class
`Rack::Cache::Key`, you can configure a proc to ignore certain keys/values like
so:
```Ruby
Rack::Cache::Key.query_string_ignore = proc { |k, v| k =~ /^(trk|utm)_/ }
```
License: MIT
[](https://github.com/rack/rack-cache/actions/workflows/development.yml)
rack-cache-1.17.0/Rakefile 0000664 0000000 0000000 00000004077 14603723563 0015247 0 ustar 00root root 0000000 0000000 require 'bundler/setup'
require 'bundler/gem_tasks'
require 'rake/clean'
require 'bump/tasks'
task :default => :test
CLEAN.include %w[coverage/ doc/api doc/gh-pages tags]
CLOBBER.include %w[dist]
desc 'Run tests'
task :test do
sh "bundle exec mtest test"
end
desc 'Generate test coverage report'
task :rcov do
sh "rcov -I.:lib:test test/*_test.rb"
end
# DOC =======================================================================
desc 'Build all documentation'
task :doc => %w[doc:api doc:markdown]
desc 'Build API documentation (doc/api)'
task 'doc:api' => 'doc/api/index.html'
file 'doc/api/index.html' => FileList['lib/**/*.rb'] do |f|
rm_rf 'doc/api'
sh((<<-SH).gsub(/[\s\n]+/, ' ').strip)
rdoc
--op doc/api
--charset utf8
--fmt hanna
--line-numbers
--main cache.rb
--title 'Rack::Cache API Documentation'
#{f.prerequisites.join(' ')}
SH
end
CLEAN.include 'doc/api'
desc 'Build markdown documentation files'
task 'doc:markdown'
FileList['doc/*.markdown'].each do |source|
dest = "doc/#{File.basename(source, '.markdown')}.html"
file dest => [source, 'doc/layout.html.erb'] do |f|
puts "markdown: #{source} -> #{dest}" if verbose
require 'erb' unless defined? ERB
template = File.read(source)
content = Markdown.new(ERB.new(template, 0, "%<>").result(binding), :smart).to_html
content.match("
(.*)
")[1] rescue ''
layout = ERB.new(File.read("doc/layout.html.erb"), 0, "%<>")
output = layout.result(binding)
File.open(dest, 'w') { |io| io.write(output) }
end
task 'doc:markdown' => dest
CLEAN.include dest
end
desc 'Move documentation to directory for github pages'
task 'doc:gh-pages' => [:clean, :doc] do
html_files = FileList['doc/*.markdown'].map { |file| file.gsub('.markdown', '.html')}
css_files = FileList['doc/*.css']
FileUtils.mkdir('doc/gh-pages')
FileUtils.cp_r('doc/api/', 'doc/gh-pages/api')
FileUtils.cp([*html_files, *css_files], 'doc/gh-pages')
end
desc 'Start the documentation development server'
task 'doc:server' do
sh 'cd doc && thin --rackup server.ru --port 3035 start'
end
rack-cache-1.17.0/doc/ 0000775 0000000 0000000 00000000000 14603723563 0014337 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/doc/configuration.markdown 0000664 0000000 0000000 00000013103 14603723563 0020750 0 ustar 00root root 0000000 0000000 Configuration
=============
__Rack::Cache__ includes a configuration system that can be used to specify
fairly sophisticated cache policy on a global or per-request basis.
Setting Cache Options
---------------------
Cache options can be set when the __Rack::Cache__ object is created,
or by setting a `rack-cache.
Headers:
<% response.headers.each do |key, value| %>
<%= key %>: <%= value %>
<% end %>
Params:
<% params.each do |key, value| %>
<%= key %>: <%= value || '(blank)' %>
<% end %>
rack-cache-1.17.0/gems.rb 0000664 0000000 0000000 00000000160 14603723563 0015047 0 ustar 00root root 0000000 0000000 source "https://rubygems.org"
gemspec
group :maintenance, optional: true do
gem "bake"
gem "bake-gem"
end
rack-cache-1.17.0/gems/ 0000775 0000000 0000000 00000000000 14603723563 0014525 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/gems/rack_v2-1.rb 0000664 0000000 0000000 00000000152 14603723563 0016535 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
source 'https://rubygems.org'
gemspec path: "../"
gem 'rack', '~> 2.1.0'
rack-cache-1.17.0/gems/rack_v2.rb 0000664 0000000 0000000 00000000150 14603723563 0016375 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
source 'https://rubygems.org'
gemspec path: "../"
gem 'rack', '~> 2.0'
rack-cache-1.17.0/gems/rack_v3.rb 0000664 0000000 0000000 00000000150 14603723563 0016376 0 ustar 00root root 0000000 0000000 # frozen_string_literal: true
source 'https://rubygems.org'
gemspec path: "../"
gem 'rack', '~> 3.0'
rack-cache-1.17.0/lib/ 0000775 0000000 0000000 00000000000 14603723563 0014340 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/lib/rack-cache.rb 0000664 0000000 0000000 00000000025 14603723563 0016643 0 ustar 00root root 0000000 0000000 require 'rack/cache'
rack-cache-1.17.0/lib/rack/ 0000775 0000000 0000000 00000000000 14603723563 0015260 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/lib/rack/cache.rb 0000664 0000000 0000000 00000003137 14603723563 0016654 0 ustar 00root root 0000000 0000000 require 'rack'
# = HTTP Caching For Rack
#
# Rack::Cache is suitable as a quick, drop-in component to enable HTTP caching
# for Rack-enabled applications that produce freshness (+expires+, +cache-control+)
# and/or validation (+last-modified+, +etag+) information.
#
# * Standards-based (RFC 2616 compliance)
# * Freshness/expiration based caching and validation
# * Supports HTTP Vary
# * Portable: 100% Ruby / works with any Rack-enabled framework
# * Disk, memcached, and heap memory storage backends
#
# === Usage
#
# Create with default options:
# require 'rack/cache'
# Rack::Cache.new(app, :verbose => true, :entitystore => 'file:cache')
#
# Within a rackup file (or with Rack::Builder):
# require 'rack/cache'
# use Rack::Cache do
# set :verbose, true
# set :metastore, 'memcached://localhost:11211/meta'
# set :entitystore, 'file:/var/cache/rack'
# end
# run app
module Rack::Cache
autoload :Request, 'rack/cache/request'
autoload :Response, 'rack/cache/response'
autoload :Context, 'rack/cache/context'
autoload :Storage, 'rack/cache/storage'
autoload :CacheControl, 'rack/cache/cache_control'
# Create a new Rack::Cache middleware component that fetches resources from
# the specified backend application. The +options+ Hash can be used to
# specify default configuration values (see attributes defined in
# Rack::Cache::Options for possible key/values). When a block is given, it
# is executed within the context of the newly create Rack::Cache::Context
# object.
def self.new(backend, options={}, &b)
Context.new(backend, options, &b)
end
end
rack-cache-1.17.0/lib/rack/cache/ 0000775 0000000 0000000 00000000000 14603723563 0016323 5 ustar 00root root 0000000 0000000 rack-cache-1.17.0/lib/rack/cache/app_engine.rb 0000664 0000000 0000000 00000002241 14603723563 0020754 0 ustar 00root root 0000000 0000000 require 'base64'
module Rack::Cache::AppEngine
module MC
require 'java'
import com.google.appengine.api.memcache.Expiration;
import com.google.appengine.api.memcache.MemcacheService;
import com.google.appengine.api.memcache.MemcacheServiceFactory;
import com.google.appengine.api.memcache.Stats;
Service = MemcacheServiceFactory.getMemcacheService
end unless defined?(Rack::Cache::AppEngine::MC)
class MemCache
def initialize(options = {})
@cache = MC::Service
@cache.namespace = options[:namespace] if options[:namespace]
end
def contains?(key)
MC::Service.contains(key)
end
def get(key)
value = MC::Service.get(key)
Marshal.load(Base64.decode64(value)) if value
end
def put(key, value, ttl = nil)
expiration = ttl ? MC::Expiration.byDeltaSeconds(ttl) : nil
value = Base64.encode64(Marshal.dump(value)).gsub(/\n/, '')
MC::Service.put(key, value, expiration)
end
def namespace
MC::Service.getNamespace
end
def namespace=(value)
MC::Service.setNamespace(value.to_s)
end
def delete(key)
MC::Service.delete(key)
end
end
end
rack-cache-1.17.0/lib/rack/cache/appengine.rb 0000664 0000000 0000000 00000000113 14603723563 0020611 0 ustar 00root root 0000000 0000000 warn "use require 'rack/cache/app_engine'"
require 'rack/cache/app_engine'
rack-cache-1.17.0/lib/rack/cache/cache_control.rb 0000664 0000000 0000000 00000024103 14603723563 0021453 0 ustar 00root root 0000000 0000000 module Rack
module Cache
# Parses a cache-control header and exposes the directives as a Hash.
# Directives that do not have values are set to +true+.
class CacheControl < Hash
def initialize(value=nil)
parse(value)
end
# Indicates that the response MAY be cached by any cache, even if it
# would normally be non-cacheable or cacheable only within a non-
# shared cache.
#
# A response may be considered public without this directive if the
# private directive is not set and the request does not include an
# Authorization header.
def public?
self['public']
end
# Indicates that all or part of the response message is intended for
# a single user and MUST NOT be cached by a shared cache. This
# allows an origin server to state that the specified parts of the
# response are intended for only one user and are not a valid
# response for requests by other users. A private (non-shared) cache
# MAY cache the response.
#
# Note: This usage of the word private only controls where the
# response may be cached, and cannot ensure the privacy of the
# message content.
def private?
self['private']
end
# When set in a response, a cache MUST NOT use the response to satisfy a
# subsequent request without successful revalidation with the origin
# server. This allows an origin server to prevent caching even by caches
# that have been configured to return stale responses to client requests.
#
# Note that this does not necessary imply that the response may not be
# stored by the cache, only that the cache cannot serve it without first
# making a conditional GET request with the origin server.
#
# When set in a request, the server MUST NOT use a cached copy for its
# response. This has quite different semantics compared to the no-cache
# directive on responses. When the client specifies no-cache, it causes
# an end-to-end reload, forcing each cache to update their cached copies.
def no_cache?
self['no-cache']
end
# Indicates that the response MUST NOT be stored under any circumstances.
#
# The purpose of the no-store directive is to prevent the
# inadvertent release or retention of sensitive information (for
# example, on backup tapes). The no-store directive applies to the
# entire message, and MAY be sent either in a response or in a
# request. If sent in a request, a cache MUST NOT store any part of
# either this request or any response to it. If sent in a response,
# a cache MUST NOT store any part of either this response or the
# request that elicited it. This directive applies to both non-
# shared and shared caches. "MUST NOT store" in this context means
# that the cache MUST NOT intentionally store the information in
# non-volatile storage, and MUST make a best-effort attempt to
# remove the information from volatile storage as promptly as
# possible after forwarding it.
#
# The purpose of this directive is to meet the stated requirements
# of certain users and service authors who are concerned about
# accidental releases of information via unanticipated accesses to
# cache data structures. While the use of this directive might
# improve privacy in some cases, we caution that it is NOT in any
# way a reliable or sufficient mechanism for ensuring privacy. In
# particular, malicious or compromised caches might not recognize or
# obey this directive, and communications networks might be
# vulnerable to eavesdropping.
def no_store?
self['no-store']
end
# The expiration time of an entity MAY be specified by the origin
# server using the expires header (see section 14.21). Alternatively,
# it MAY be specified using the max-age directive in a response. When
# the max-age cache-control directive is present in a cached response,
# the response is stale if its current age is greater than the age
# value given (in seconds) at the time of a new request for that
# resource. The max-age directive on a response implies that the
# response is cacheable (i.e., "public") unless some other, more
# restrictive cache directive is also present.
#
# If a response includes both an expires header and a max-age
# directive, the max-age directive overrides the expires header, even
# if the expires header is more restrictive. This rule allows an origin
# server to provide, for a given response, a longer expiration time to
# an HTTP/1.1 (or later) cache than to an HTTP/1.0 cache. This might be
# useful if certain HTTP/1.0 caches improperly calculate ages or
# expiration times, perhaps due to desynchronized clocks.
#
# Many HTTP/1.0 cache implementations will treat an expires value that
# is less than or equal to the response Date value as being equivalent
# to the cache-control response directive "no-cache". If an HTTP/1.1
# cache receives such a response, and the response does not include a
# cache-control header field, it SHOULD consider the response to be
# non-cacheable in order to retain compatibility with HTTP/1.0 servers.
#
# When the max-age directive is included in the request, it indicates
# that the client is willing to accept a response whose age is no
# greater than the specified time in seconds.
def max_age
self['max-age'].to_i if key?('max-age')
end
# If a response includes an s-maxage directive, then for a shared
# cache (but not for a private cache), the maximum age specified by
# this directive overrides the maximum age specified by either the
# max-age directive or the expires header. The s-maxage directive
# also implies the semantics of the proxy-revalidate directive. i.e.,
# that the shared cache must not use the entry after it becomes stale
# to respond to a subsequent request without first revalidating it with
# the origin server. The s-maxage directive is always ignored by a
# private cache.
def shared_max_age
self['s-maxage'].to_i if key?('s-maxage')
end
alias_method :s_maxage, :shared_max_age
# If a response includes a r-maxage directive, then for a reverse cache
# (but not for a private or proxy cache), the maximum age specified by
# this directive overrides the maximum age specified by either the max-age
# directive, the s-maxage directive, or the expires header. The r-maxage
# directive also implies the semantics of the proxy-revalidate directive.
# i.e., that the reverse cache must not use the entry after it becomes
# stale to respond to a subsequent request without first revalidating it
# with the origin server. The r-maxage directive is always ignored by
# private and proxy caches.
def reverse_max_age
self['r-maxage'].to_i if key?('r-maxage')
end
alias_method :r_maxage, :reverse_max_age
# Because a cache MAY be configured to ignore a server's specified
# expiration time, and because a client request MAY include a max-
# stale directive (which has a similar effect), the protocol also
# includes a mechanism for the origin server to require revalidation
# of a cache entry on any subsequent use. When the must-revalidate
# directive is present in a response received by a cache, that cache
# MUST NOT use the entry after it becomes stale to respond to a
# subsequent request without first revalidating it with the origin
# server. (I.e., the cache MUST do an end-to-end revalidation every
# time, if, based solely on the origin server's expires or max-age
# value, the cached response is stale.)
#
# The must-revalidate directive is necessary to support reliable
# operation for certain protocol features. In all circumstances an
# HTTP/1.1 cache MUST obey the must-revalidate directive; in
# particular, if the cache cannot reach the origin server for any
# reason, it MUST generate a 504 (Gateway Timeout) response.
#
# Servers SHOULD send the must-revalidate directive if and only if
# failure to revalidate a request on the entity could result in
# incorrect operation, such as a silently unexecuted financial
# transaction. Recipients MUST NOT take any automated action that
# violates this directive, and MUST NOT automatically provide an
# unvalidated copy of the entity if revalidation fails.
def must_revalidate?
self['must-revalidate']
end
# The proxy-revalidate directive has the same meaning as the must-
# revalidate directive, except that it does not apply to non-shared
# user agent caches. It can be used on a response to an
# authenticated request to permit the user's cache to store and
# later return the response without needing to revalidate it (since
# it has already been authenticated once by that user), while still
# requiring proxies that service many users to revalidate each time
# (in order to make sure that each user has been authenticated).
# Note that such authenticated responses also need the public cache
# control directive in order to allow them to be cached at all.
def proxy_revalidate?
self['proxy-revalidate']
end
def to_s
bools, vals = [], []
each do |key,value|
if value == true
bools << key
elsif value
vals << "#{key}=#{value}"
end
end
(bools.sort + vals.sort).join(', ')
end
private
def parse(value)
return if value.nil? || value.empty?
value.delete(' ').split(',').each do |part|
next if part.empty?
name, value = part.split('=', 2)
self[name.downcase] = (value || true) unless name.empty?
end
self
end
end
end
end
rack-cache-1.17.0/lib/rack/cache/cachecontrol.rb 0000664 0000000 0000000 00000000121 14603723563 0021306 0 ustar 00root root 0000000 0000000 warn "use require 'rack/cache/cache_control'"
require 'rack/cache/cache_control'
rack-cache-1.17.0/lib/rack/cache/context.rb 0000664 0000000 0000000 00000023600 14603723563 0020335 0 ustar 00root root 0000000 0000000 require 'rack/cache/options'
require 'rack/cache/request'
require 'rack/cache/response'
require 'rack/cache/storage'
module Rack::Cache
# Implements Rack's middleware interface and provides the context for all
# cache logic, including the core logic engine.
class Context
include Rack::Cache::Options
# Array of trace Symbols
attr_reader :trace
# The Rack application object immediately downstream.
attr_reader :backend
def initialize(backend, options={})
@backend = backend
@trace = []
@env = nil
@options = options
initialize_options options
yield self if block_given?
@private_header_keys =
private_headers.map { |name| "HTTP_#{name.upcase.tr('-', '_')}" }
end
# The configured MetaStore instance. Changing the rack-cache.metastore
# value effects the result of this method immediately.
def metastore
uri = options['rack-cache.metastore']
storage.resolve_metastore_uri(uri, @options)
end
# The configured EntityStore instance. Changing the rack-cache.entitystore
# value effects the result of this method immediately.
def entitystore
uri = options['rack-cache.entitystore']
storage.resolve_entitystore_uri(uri, @options)
end
# The Rack call interface. The receiver acts as a prototype and runs
# each request in a dup object unless the +rack.run_once+ variable is
# set in the environment.
def call(env)
if env['rack.run_once'] && !env['rack.multithread']
call! env
else
clone.call! env
end
end
# The real Rack call interface. The caching logic is performed within
# the context of the receiver.
def call!(env)
@trace = []
@default_options.each { |k,v| env[k] ||= v }
@env = env
@request = Request.new(@env.dup.freeze)
response =
if @request.get? || @request.head?
if !@env['HTTP_EXPECT'] && !@env['rack-cache.force-pass']
lookup
else
pass
end
else
if @request.options?
pass
else
invalidate
end
end
# log trace and set x-rack-cache tracing header
trace = @trace.join(', ')
response.headers['x-rack-cache'] = trace
# write log message to rack.errors
if verbose?
message = "cache: [%s %s] %s\n" %
[@request.request_method, @request.fullpath, trace]
log_info(message)
end
# tidy up response a bit
if (@request.get? || @request.head?) && not_modified?(response)
response.not_modified!
end
if @request.head?
response.body.close if response.body.respond_to?(:close)
response.body = []
end
response.to_a
end
private
# Record that an event took place.
def record(event)
@trace << event
end
# Does the request include authorization or other sensitive information
# that should cause the response to be considered private by default?
# Private responses are not stored in the cache.
def private_request?
@private_header_keys.any? { |key| @env.key?(key) }
end
# Determine if the #response validators (etag, last-modified) matches
# a conditional value specified in #request.
def not_modified?(response)
last_modified = @request.env['HTTP_IF_MODIFIED_SINCE']
if etags = @request.env['HTTP_IF_NONE_MATCH']
etags = etags.split(/\s*,\s*/)
(etags.include?(response.etag) || etags.include?('*')) && (!last_modified || response.last_modified == last_modified)
elsif last_modified
response.last_modified == last_modified
end
end
# Whether the cache entry is "fresh enough" to satisfy the request.
def fresh_enough?(entry)
if entry.fresh?
if allow_revalidate? && max_age = @request.cache_control.max_age
max_age > 0 && max_age >= entry.age
else
true
end
end
end
# Delegate the request to the backend and create the response.
def forward
Response.new(*backend.call(@env))
end
# The request is sent to the backend, and the backend's response is sent
# to the client, but is not entered into the cache.
def pass
record :pass
forward
end
# Invalidate POST, PUT, DELETE and all methods not understood by this cache
# See RFC2616 13.10
def invalidate
metastore.invalidate(@request, entitystore)
rescue => e
log_error(e)
pass
else
record :invalidate
pass
end
# Try to serve the response from cache. When a matching cache entry is
# found and is fresh, use it as the response without forwarding any request
# to the backend. When a matching cache entry is found but is stale, attempt
# to #validate the entry with the backend using conditional GET.
# If validation raises an exception and fault tolerant caching is enabled,
# serve the stale cache entry.
# When no matching cache entry is found, trigger miss processing.
def lookup
if @request.no_cache? && allow_reload?
record :reload
fetch
else
begin
entry = metastore.lookup(@request, entitystore)
rescue => e
log_error(e)
return pass
end
if entry
if fresh_enough?(entry)
record :fresh
entry.headers['age'] = entry.age.to_s
entry
else
record :stale
if fault_tolerant?
validate_with_stale_cache_failover(entry)
else
validate(entry)
end
end
else
record :miss
fetch
end
end
end
# Returns stale cache on exception.
def validate_with_stale_cache_failover(entry)
validate(entry)
rescue => e
record :connnection_failed
age = entry.age.to_s
entry.headers['age'] = age
record "Fail-over to stale cache data with age #{age} due to #{e.class.name}: #{e}"
entry
end
# Validate that the cache entry is fresh. The original request is used
# as a template for a conditional GET request with the backend.
def validate(entry)
# send no head requests because we want content
convert_head_to_get!
# add our cached last-modified validator to the environment
@env['HTTP_IF_MODIFIED_SINCE'] = entry.last_modified
# Add our cached etag validator to the environment.
# We keep the etags from the client to handle the case when the client
# has a different private valid entry which is not cached here.
cached_etags = entry.etag.to_s.split(/\s*,\s*/)
request_etags = @request.env['HTTP_IF_NONE_MATCH'].to_s.split(/\s*,\s*/)
etags = (cached_etags + request_etags).uniq
@env['HTTP_IF_NONE_MATCH'] = etags.empty? ? nil : etags.join(', ')
response = forward
if response.status == 304
record :valid
# Check if the response validated which is not cached here
etag = response.headers['etag']
return response if etag && request_etags.include?(etag) && !cached_etags.include?(etag)
entry = entry.dup
entry.headers.delete('date')
%w[Date expires cache-control etag last-modified].each do |name|
next unless value = response.headers[name]
entry.headers[name] = value
end
# even though it's empty, be sure to close the response body from upstream
# because middleware use close to signal end of response
response.body.close if response.body.respond_to?(:close)
response = entry
else
record :invalid
end
store(response) if response.cacheable?
response
end
# The cache missed or a reload is required. Forward the request to the
# backend and determine whether the response should be stored. This allows
# conditional / validation requests through to the backend but performs no
# caching of the response when the backend returns a 304.
def fetch
# send no head requests because we want content
convert_head_to_get!
response = forward
# Mark the response as explicitly private if any of the private
# request headers are present and the response was not explicitly
# declared public.
if private_request? && !response.cache_control.public?
response.private = true
elsif default_ttl > 0 && response.ttl.nil? && !response.cache_control.must_revalidate?
# assign a default TTL for the cache entry if none was specified in
# the response; the must-revalidate cache control directive disables
# default ttl assigment.
response.ttl = default_ttl
end
store(response) if response.cacheable?
response
end
# Write the response to the cache.
def store(response)
strip_ignore_headers(response)
metastore.store(@request, response, entitystore)
response.headers['age'] = response.age.to_s
rescue => e
log_error(e)
nil
else
record :store
end
# Remove all ignored response headers before writing to the cache.
def strip_ignore_headers(response)
stripped_values = ignore_headers.map { |name| response.headers.delete(name) }
record :ignore if stripped_values.any?
end
def log_error(exception)
message = "cache error: #{exception.message}\n#{exception.backtrace.join("\n")}\n"
log(:error, message)
end
def log_info(message)
log(:info, message)
end
def log(level, message)
if @env['rack.logger']
@env['rack.logger'].send(level, message)
else
@env['rack.errors'].write(message)
end
end
# send no head requests because we want content
def convert_head_to_get!
if @env['REQUEST_METHOD'] == 'HEAD'
@env['REQUEST_METHOD'] = 'GET'
@env['rack.methodoverride.original_method'] = 'HEAD'
end
end
end
end
rack-cache-1.17.0/lib/rack/cache/entity_store.rb 0000664 0000000 0000000 00000021145 14603723563 0021403 0 ustar 00root root 0000000 0000000 require 'digest/sha1'
module Rack::Cache
# Entity stores are used to cache response bodies across requests. All
# Implementations are required to calculate a SHA checksum of the data written
# which becomes the response body's key.
class EntityStore
# Read body calculating the SHA1 checksum and size while
# yielding each chunk to the block. If the body responds to close,
# call it after iteration is complete. Return a two-tuple of the form:
# [ hexdigest, size ].
def slurp(body)
digest, size = Digest::SHA1.new, 0
body.each do |part|
size += bytesize(part)
digest << part
yield part
end
body.close if body.respond_to? :close
[digest.hexdigest, size]
end
if ''.respond_to?(:bytesize)
def bytesize(string); string.bytesize; end
else
def bytesize(string); string.size; end
end
private :slurp, :bytesize
# Stores entity bodies on the heap using a Hash object.
class Heap < EntityStore
# Create the store with the specified backing Hash.
def initialize(hash={}, options = {})
@hash = hash
@options = options
end
# Determine whether the response body with the specified key (SHA1)
# exists in the store.
def exist?(key)
@hash.include?(key)
end
# Return an object suitable for use as a Rack response body for the
# specified key.
def open(key)
(body = @hash[key]) && body.dup
end
# Read all data associated with the given key and return as a single
# String.
def read(key)
(body = @hash[key]) && body.join
end
# Write the Rack response body immediately and return the SHA1 key.
def write(body, ttl=nil)
buf = []
key, size = slurp(body) { |part| buf << part }
@hash[key] = buf
[key, size]
end
# Remove the body corresponding to key; return nil.
def purge(key)
@hash.delete(key)
nil
end
def self.resolve(uri, options = {})
new({}, options)
end
end
HEAP = Heap
MEM = Heap
# Stores entity bodies on disk at the specified path.
class Disk < EntityStore
# Path where entities should be stored. This directory is
# created the first time the store is instansiated if it does not
# already exist.
attr_reader :root
def initialize(root)
@root = root
FileUtils.mkdir_p root, :mode => 0755
end
def exist?(key)
File.exist?(body_path(key))
end
def read(key)
File.open(body_path(key), 'rb') { |f| f.read }
rescue Errno::ENOENT
nil
end
class Body < ::File #:nodoc:
def each
while part = read(8192)
yield part
end
end
alias_method :to_path, :path
end
# Open the entity body and return an IO object. The IO object's
# each method is overridden to read 8K chunks instead of lines.
def open(key)
Body.open(body_path(key), 'rb')
rescue Errno::ENOENT
nil
end
def write(body, ttl=nil)
filename = ['buf', $$, Thread.current.object_id].join('-')
temp_file = storage_path(filename)
key, size =
File.open(temp_file, 'wb') { |dest|
slurp(body) { |part| dest.write(part) }
}
path = body_path(key)
if File.exist?(path)
File.unlink temp_file
else
FileUtils.mkdir_p File.dirname(path), :mode => 0755
FileUtils.mv temp_file, path
end
[key, size]
end
def purge(key)
File.unlink body_path(key)
nil
rescue Errno::ENOENT
nil
end
protected
def storage_path(stem)
File.join root, stem
end
def spread(key)
key = key.dup
key[2,0] = '/'
key
end
def body_path(key)
storage_path spread(key)
end
def self.resolve(uri)
path = File.expand_path(uri.opaque || uri.path)
new path
end
end
DISK = Disk
FILE = Disk
# Base class for memcached entity stores.
class MemCacheBase < EntityStore
# The underlying Memcached instance used to communicate with the
# memcached daemon.
attr_reader :cache
extend Rack::Utils
def open(key)
data = read(key)
data && [data]
end
def self.resolve(uri)
if uri.respond_to?(:scheme)
server = "#{uri.host}:#{uri.port || '11211'}"
options = parse_query(uri.query)
options.keys.each do |key|
value =
case value = options.delete(key)
when 'true' ; true
when 'false' ; false
else value.to_sym
end
options[key.to_sym] = value
end
options[:namespace] = uri.path.sub(/^\//, '')
new server, options
else
# if the object provided is not a URI, pass it straight through
# to the underlying implementation.
new uri
end
end
end
# Uses the Dalli ruby library. This is the default unless
# the memcached library has already been required.
class Dalli < MemCacheBase
def initialize(server="localhost:11211", options={})
@cache =
if server.respond_to?(:stats)
server
else
require 'dalli'
::Dalli::Client.new(server, options)
end
end
def exist?(key)
!cache.get(key).nil?
end
def read(key)
data = cache.get(key)
data.force_encoding('BINARY') if data.respond_to?(:force_encoding)
data
end
def write(body, ttl=nil)
buf = StringIO.new
key, size = slurp(body){|part| buf.write(part) }
[key, size] if cache.set(key, buf.string, ttl)
end
def purge(key)
cache.delete(key)
nil
end
end
# Uses the memcached client library. The ruby based memcache-client is used
# in preference to this store unless the memcached library has already been
# required.
class MemCached < MemCacheBase
def initialize(server="localhost:11211", options={})
options[:prefix_key] ||= options.delete(:namespace) if options.key?(:namespace)
@cache =
if server.respond_to?(:stats)
server
else
require 'memcached'
::Memcached.new(server, options)
end
end
def exist?(key)
cache.append(key, '')
true
rescue ::Memcached::NotStored
false
end
def read(key)
cache.get(key, false)
rescue ::Memcached::NotFound
nil
end
def write(body, ttl=0)
buf = StringIO.new
key, size = slurp(body){|part| buf.write(part) }
cache.set(key, buf.string, ttl, false)
[key, size]
end
def purge(key)
cache.delete(key)
nil
rescue ::Memcached::NotFound
nil
end
end
MEMCACHE = Dalli
MEMCACHED = MEMCACHE
class GAEStore < EntityStore
attr_reader :cache
def initialize(options = {})
require 'rack/cache/app_engine'
@cache = Rack::Cache::AppEngine::MemCache.new(options)
end
def exist?(key)
cache.contains?(key)
end
def read(key)
cache.get(key)
end
def open(key)
if data = read(key)
[data]
else
nil
end
end
def write(body, ttl=nil)
buf = StringIO.new
key, size = slurp(body){|part| buf.write(part) }
cache.put(key, buf.string, ttl)
[key, size]
end
def purge(key)
cache.delete(key)
nil
end
def self.resolve(uri)
self.new(:namespace => uri.host)
end
end
GAECACHE = GAEStore
GAE = GAEStore
# Noop Entity Store backend.
#
# Set `entitystore` to 'noop:/'.
# Does not persist response bodies (no disk/memory used).
# Responses from the cache will have an empty body.
# Clients must ignore these empty cached response (check for x-rack-cache response header).
# Atm cannot handle streamed responses, patch needed.
#
class Noop < EntityStore
def exist?(key)
true
end
def read(key)
''
end
def open(key)
[]
end
def write(body, ttl=nil)
key, size = slurp(body) { |part| part }
[key, size]
end
def purge(key)
nil
end
def self.resolve(uri)
new
end
end
NOOP = Noop
end
end
rack-cache-1.17.0/lib/rack/cache/entitystore.rb 0000664 0000000 0000000 00000000117 14603723563 0021240 0 ustar 00root root 0000000 0000000 warn "use require 'rack/cache/entity_store'"
require 'rack/cache/entity_store'
rack-cache-1.17.0/lib/rack/cache/headers.rb 0000664 0000000 0000000 00000000743 14603723563 0020267 0 ustar 00root root 0000000 0000000 module Rack::Cache
begin
# For `Rack::Headers` (Rack 3+):
require "rack/headers"
Headers = ::Rack::Headers
def self.Headers(headers)
Headers[headers]
end
rescue LoadError
# For `Rack::Utils::HeaderHash`:
require "rack/utils"
Headers = ::Rack::Utils::HeaderHash
def self.Headers(headers)
if headers.is_a?(Headers) && !headers.frozen?
return headers
else
return Headers.new(headers)
end
end
end
end
rack-cache-1.17.0/lib/rack/cache/key.rb 0000664 0000000 0000000 00000003636 14603723563 0017450 0 ustar 00root root 0000000 0000000 require 'rack/utils'
module Rack::Cache
class Key
include Rack::Utils
# A proc for ignoring parts of query strings when generating a key. This is
# useful when you have parameters like `utm` or `trk` that don't affect the
# content on the page and are unique per-visitor or campaign. Parameters
# like these will be part of the key and cause a lot of churn.
#
# The block will be passed a key and value which are the name and value of
# that parameter.
#
# Example:
# `Rack::Cache::Key.query_string_ignore = proc { |k, v| k =~ /^(trk|utm)_/ }`
#
class << self
attr_accessor :query_string_ignore
end
# Implement .call, since it seems like the "Rack-y" thing to do. Plus, it
# opens the door for cache key generators to just be blocks.
def self.call(request)
new(request).generate
end
def initialize(request)
@request = request
end
# Generate a normalized cache key for the request.
def generate
parts = []
parts << @request.scheme << "://"
parts << @request.host
if @request.scheme == "https" && @request.port != 443 ||
@request.scheme == "http" && @request.port != 80
parts << ":" << @request.port.to_s
end
parts << @request.script_name
parts << @request.path_info
if qs = query_string
parts << "?"
parts << qs
end
parts.join
end
private
# Build a normalized query string by alphabetizing all keys/values
# and applying consistent escaping.
def query_string
return nil if @request.query_string.to_s.empty?
parts = @request.query_string.split(/[&;] */n)
parts.map! { |p| p.split('=', 2).map!{ |s| unescape(s) } }
parts.sort!
parts.reject!(&self.class.query_string_ignore)
parts.map! { |k,v| "#{escape(k)}=#{escape(v)}" }
parts.empty? ? nil : parts.join('&')
end
end
end
rack-cache-1.17.0/lib/rack/cache/meta_store.rb 0000664 0000000 0000000 00000030715 14603723563 0021020 0 ustar 00root root 0000000 0000000 require 'fileutils'
require 'digest/sha1'
require 'rack/utils'
require 'rack/cache/key'
module Rack::Cache
# The MetaStore is responsible for storing meta information about a
# request/response pair keyed by the request's URL.
#
# The meta store keeps a list of request/response pairs for each canonical
# request URL. A request/response pair is a two element Array of the form:
# [request, response]
#
# The +request+ element is a Hash of Rack environment keys. Only protocol
# keys (i.e., those that start with "HTTP_") are stored. The +response+
# element is a Hash of cached HTTP response headers for the paired request.
#
# The MetaStore class is abstract and should not be instanstiated
# directly. Concrete subclasses should implement the protected #read,
# #write, and #purge methods. Care has been taken to keep these low-level
# methods dumb and straight-forward to implement.
class MetaStore
# Locate a cached response for the request provided. Returns a
# Rack::Cache::Response object if the cache hits or nil if no cache entry
# was found.
def lookup(request, entity_store)
key = cache_key(request)
entries = read(key)
# bail out if we have nothing cached
return nil if entries.empty?
# find a cached entry that matches the request.
env = request.env
match = entries.detect{ |req,res| requests_match?((res['vary'] || res['vary']), env, req) }
return nil if match.nil?
_, res = match
entity_key = res['x-content-digest']
if entity_key && body = entity_store.open(entity_key)
restore_response(res, body)
else
# the metastore referenced an entity that doesn't exist in
# the entitystore, purge the entry from the meta-store
begin
purge(key)
rescue NotImplementedError
@@warned_on_purge ||= begin
warn "WARNING: Future releases may require purge implementation for #{self.class.name}"
true
end
nil
end
end
end
# Write a cache entry to the store under the given key. Existing
# entries are read and any that match the response are removed.
# This method calls #write with the new list of cache entries.
def store(request, response, entity_store)
key = cache_key(request)
stored_env = persist_request(request)
# write the response body to the entity store if this is the
# original response.
if response.headers['x-content-digest'].nil?
if request.env['rack-cache.use_native_ttl'] && response.fresh?
digest, size = entity_store.write(response.body, response.ttl)
else
digest, size = entity_store.write(response.body)
end
response.headers['x-content-digest'] = digest
response.headers['content-length'] = size.to_s unless response.headers['Transfer-Encoding']
# If the entitystore backend is a Noop, do not try to read the body from the backend, it always returns an empty array
unless entity_store.is_a? Rack::Cache::EntityStore::Noop
# A stream body can only be read once and is currently closed by #write.
# (To avoid having to keep giant objects in memory when writing to disk cache
# the body is never converted to a single string)
# We cannot always reply on body to be re-readable,
# so we have to read it from the cache.
# BUG: if the cache was unable to store a stream, the stream will be closed
# and rack will try to read it again, resulting in hard to track down exception
response.body = entity_store.open(digest) || response.body
end
end
# read existing cache entries, remove non-varying, and add this one to
# the list
vary = response.vary
entries =
read(key).reject do |env, res|
(vary == (res['vary'])) &&
requests_match?(vary, env, stored_env)
end
headers = persist_response(response)
headers.delete('age')
entries.unshift [stored_env, headers]
if request.env['rack-cache.use_native_ttl'] && response.fresh?
write key, entries, response.ttl
else
write key, entries
end
key
end
# Generate a cache key for the request.
def cache_key(request)
keygen = request.env['rack-cache.cache_key'] || Key
keygen.call(request)
end
# Invalidate all cache entries that match the request.
def invalidate(request, entity_store)
modified = false
key = cache_key(request)
entries =
read(key).map do |req, res|
response = restore_response(res)
if response.fresh?
response.expire!
modified = true
end
[req, persist_response(response)]
end
write key, entries if modified
end
private
# Extract the environment Hash from +request+ while making any
# necessary modifications in preparation for persistence. The Hash
# returned must be marshalable.
def persist_request(request)
env = request.env.dup
env.reject! { |key,val| key =~ /[^0-9A-Z_]/ || !val.respond_to?(:to_str) }
env
end
# Converts a stored response hash into a Response object. The caller
# is responsible for loading and passing the body if needed.
def restore_response(hash, body=[])
status = hash.delete('x-status').to_i
Rack::Cache::Response.new(status, hash, body)
end
def persist_response(response)
hash = response.headers.dup
hash['x-status'] = response.status.to_s
hash
end
# Determine whether the two environment hashes are non-varying based on
# the vary response header value provided.
def requests_match?(vary, env1, env2)
return true if vary.nil? || vary == ''
vary.split(/[\s,]+/).all? do |header|
key = "HTTP_#{header.upcase.tr('-', '_')}"
env1[key] == env2[key]
end
end
protected
# Locate all cached request/response pairs that match the specified
# URL key. The result must be an Array of all cached request/response
# pairs. An empty Array must be returned if nothing is cached for
# the specified key.
def read(key)
raise NotImplementedError
end
# Store an Array of request/response pairs for the given key. Concrete
# implementations should not attempt to filter or concatenate the
# list in any way.
def write(key, negotiations, ttl = nil)
raise NotImplementedError
end
# Remove all cached entries at the key specified. No error is raised
# when the key does not exist.
def purge(key)
raise NotImplementedError
end
private
# Generate a SHA1 hex digest for the specified string. This is a
# simple utility method for meta store implementations.
def hexdigest(data)
Digest::SHA1.hexdigest(data)
end
public
# Concrete MetaStore implementation that uses a simple Hash to store
# request/response pairs on the heap.
class Heap < MetaStore
def initialize(hash={}, options = {})
@hash = hash
@options = options
end
def read(key)
if data = @hash[key]
Marshal.load(data)
else
[]
end
end
def write(key, entries, ttl = nil)
@hash[key] = Marshal.dump(entries)
end
def purge(key)
@hash.delete(key)
nil
end
def to_hash
@hash
end
def self.resolve(uri, options = {})
new({}, options)
end
end
HEAP = Heap
MEM = HEAP
# Concrete MetaStore implementation that stores request/response
# pairs on disk.
class Disk < MetaStore
attr_reader :root
def initialize(root="/tmp/rack-cache/meta-#{ARGV[0]}")
@root = File.expand_path(root)
FileUtils.mkdir_p(root, :mode => 0755)
end
def read(key)
path = key_path(key)
File.open(path, 'rb') { |io| Marshal.load(io) }
rescue Errno::ENOENT, IOError
[]
end
def write(key, entries, ttl = nil)
tries = 0
begin
path = key_path(key)
File.open(path, 'wb') { |io| Marshal.dump(entries, io, -1) }
rescue Errno::ENOENT, IOError
Dir.mkdir(File.dirname(path), 0755)
retry if (tries += 1) == 1
end
end
def purge(key)
path = key_path(key)
File.unlink(path)
nil
rescue Errno::ENOENT, IOError
nil
end
private
def key_path(key)
File.join(root, spread(hexdigest(key)))
end
def spread(sha, n=2)
sha = sha.dup
sha[n,0] = '/'
sha
end
public
def self.resolve(uri)
path = File.expand_path(uri.opaque || uri.path)
new path
end
end
DISK = Disk
FILE = Disk
# Stores request/response pairs in memcached. Keys are not stored
# directly since memcached has a 250-byte limit on key names. Instead,
# the SHA1 hexdigest of the key is used.
class MemCacheBase < MetaStore
extend Rack::Utils
# The MemCache object used to communicated with the memcached
# daemon.
attr_reader :cache
# Create MemCache store for the given URI. The URI must specify
# a host and may specify a port, namespace, and options:
#
# memcached://example.com:11211/namespace?opt1=val1&opt2=val2
#
# Query parameter names and values are documented with the memcached
# library: http://tinyurl.com/4upqnd
def self.resolve(uri)
if uri.respond_to?(:scheme)
server = "#{uri.host}:#{uri.port || '11211'}"
options = parse_query(uri.query)
options.keys.each do |key|
value =
case value = options.delete(key)
when 'true' ; true
when 'false' ; false
else value.to_sym
end
options[key.to_sym] = value
end
options[:namespace] = uri.path.to_s.sub(/^\//, '')
new server, options
else
# if the object provided is not a URI, pass it straight through
# to the underlying implementation.
new uri
end
end
end
class Dalli < MemCacheBase
def initialize(server="localhost:11211", options={})
@cache =
if server.respond_to?(:stats)
server
else
require 'dalli'
::Dalli::Client.new(server, options)
end
end
def read(key)
key = hexdigest(key)
cache.get(key) || []
end
# Default TTL to zero, interpreted as "don't expire" by Memcached.
def write(key, entries, ttl = 0)
key = hexdigest(key)
cache.set(key, entries, ttl)
end
def purge(key)
cache.delete(hexdigest(key))
nil
end
end
class MemCached < MemCacheBase
# The Memcached instance used to communicated with the memcached
# daemon.
attr_reader :cache
def initialize(server="localhost:11211", options={})
options[:prefix_key] ||= options.delete(:namespace) if options.key?(:namespace)
@cache =
if server.respond_to?(:stats)
server
else
require 'memcached'
Memcached.new(server, options)
end
end
def read(key)
key = hexdigest(key)
cache.get(key)
rescue Memcached::NotFound
[]
end
# Default TTL to zero, interpreted as "don't expire" by Memcached.
def write(key, entries, ttl = 0)
key = hexdigest(key)
cache.set(key, entries, ttl)
end
def purge(key)
key = hexdigest(key)
cache.delete(key)
nil
rescue Memcached::NotFound
nil
end
end
MEMCACHE =
if defined?(::Memcached)
MemCached
else
Dalli
end
MEMCACHED = MEMCACHE
class GAEStore < MetaStore
attr_reader :cache
def initialize(options = {})
require 'rack/cache/app_engine'
@cache = Rack::Cache::AppEngine::MemCache.new(options)
end
def read(key)
key = hexdigest(key)
cache.get(key) || []
end
def write(key, entries)
key = hexdigest(key)
cache.put(key, entries)
end
def purge(key)
key = hexdigest(key)
cache.delete(key)
nil
end
def self.resolve(uri)
self.new(:namespace => uri.host)
end
end
GAECACHE = GAEStore
GAE = GAEStore
end
end
rack-cache-1.17.0/lib/rack/cache/metastore.rb 0000664 0000000 0000000 00000000113 14603723563 0020646 0 ustar 00root root 0000000 0000000 warn "use require 'rack/cache/meta_store'"
require 'rack/cache/meta_store'
rack-cache-1.17.0/lib/rack/cache/options.rb 0000664 0000000 0000000 00000014204 14603723563 0020344 0 ustar 00root root 0000000 0000000 require 'rack/cache/key'
require 'rack/cache/storage'
module Rack::Cache
# Configuration options and utility methods for option access. Rack::Cache
# uses the Rack Environment to store option values. All options documented
# below are stored in the Rack Environment as "rack-cache.