memcache-client-1.8.5/0000755000004100000410000000000011724022457014577 5ustar www-datawww-datamemcache-client-1.8.5/test/0000755000004100000410000000000011724022457015556 5ustar www-datawww-datamemcache-client-1.8.5/test/test_mem_cache.rb0000644000004100000410000010004411724022457021042 0ustar www-datawww-data# encoding: utf-8 require 'rubygems' require 'logger' require 'stringio' require 'test/unit' $TESTING = true require 'memcache' begin gem 'flexmock' require 'flexmock/test_unit' rescue LoadError => e puts "Some tests require flexmock, please run `gem install flexmock`" end Thread.abort_on_exception = true class MemCache attr_writer :namespace attr_writer :autofix_keys end class FakeSocket attr_reader :written, :data def initialize @written = StringIO.new @data = StringIO.new end def write(data) @written.write data end def gets @data.gets end def read(arg) @data.read arg end end class Test::Unit::TestCase def requirement(bool, msg) if bool yield else puts msg assert true end end def memcached_running? TCPSocket.new('localhost', 11211) rescue false end def xprofile(name, &block) a = Time.now block.call Time.now - a end def profile(name, &block) require 'ruby-prof' a = Time.now result = RubyProf.profile(&block) time = Time.now - a printer = RubyProf::GraphHtmlPrinter.new(result) File.open("#{name}.html", 'w') do |f| printer.print(f, :min_percent=>1) end time end end class FakeServer attr_accessor :host, :port, :socket, :weight, :multithread, :status def initialize(socket = nil) @closed = false @host = 'example.com' @port = 11211 @socket = socket || FakeSocket.new @weight = 1 @multithread = true @status = "CONNECTED" end def close # begin # raise "Already closed" # rescue => e # puts e.backtrace.join("\n") # end @closed = true @socket = nil @status = "NOT CONNECTED" end def alive? # puts "I'm #{@closed ? 'dead' : 'alive'}" !@closed end end class TestMemCache < Test::Unit::TestCase def setup @cache = MemCache.new 'localhost:1', :namespace => 'my_namespace' end def test_performance requirement(memcached_running?, 'A real memcached server must be running for performance testing') do cache = MemCache.new(['localhost:11211',"127.0.0.1:11211"]) cache.flush_all cache.add('a', 1, 120) with = xprofile 'get' do 1000.times do cache.get('a') end end puts '' puts "1000 gets with socket timeout: #{with} sec" cache = MemCache.new(['localhost:11211',"127.0.0.1:11211"], :timeout => nil) cache.add('a', 1, 120) without = xprofile 'get' do 1000.times do cache.get('a') end end puts "1000 gets without socket timeout: #{without} sec" end end def test_consistent_hashing requirement(self.respond_to?(:flexmock), 'Flexmock is required to run this test') do flexmock(MemCache::Server).new_instances.should_receive(:alive?).and_return(true) # Setup a continuum of two servers @cache.servers = ['mike1', 'mike2', 'mike3'] keys = [] 1000.times do |idx| keys << idx.to_s end before_continuum = keys.map {|key| @cache.get_server_for_key(key) } @cache.servers = ['mike1', 'mike2', 'mike3', 'mike4'] after_continuum = keys.map {|key| @cache.get_server_for_key(key) } same_count = before_continuum.zip(after_continuum).find_all {|a| a[0].host == a[1].host }.size # With continuum, we should see about 75% of the keys map to the same server # With modulo, we would see about 25%. assert same_count > 700 end end def test_get_multi_with_server_failure @cache = MemCache.new 'localhost:1', :namespace => 'my_namespace', :logger => nil #Logger.new(STDOUT) s1 = FakeServer.new s2 = FakeServer.new # Write two messages to the socket to test failover s1.socket.data.write "VALUE my_namespace:a 0 14\r\n\004\b\"\0170123456789\r\nEND\r\n" s1.socket.data.rewind s2.socket.data.write "bogus response\r\nbogus response\r\n" s2.socket.data.rewind @cache.servers = [s1, s2] assert s1.alive? assert s2.alive? # a maps to s1, the rest map to s2 value = @cache.get_multi(['foo', 'bar', 'a', 'b', 'c']) assert_equal({'a'=>'0123456789'}, value) assert s1.alive? assert !s2.alive? end def test_cache_get_with_failover @cache = MemCache.new 'localhost:1', :namespace => 'my_namespace', :logger => nil#Logger.new(STDOUT) s1 = FakeServer.new s2 = FakeServer.new # Write two messages to the socket to test failover s1.socket.data.write "VALUE foo 0 14\r\n\004\b\"\0170123456789\r\n" s1.socket.data.rewind s2.socket.data.write "bogus response\r\nbogus response\r\n" s2.socket.data.rewind @cache.instance_variable_set(:@failover, true) @cache.servers = [s1, s2] assert s1.alive? assert s2.alive? @cache.get('foo') assert s1.alive? assert !s2.alive? end def test_cache_get_without_failover s1 = FakeServer.new s2 = FakeServer.new s1.socket.data.write "VALUE foo 0 14\r\n\004\b\"\0170123456789\r\n" s1.socket.data.rewind s2.socket.data.write "bogus response\r\nbogus response\r\n" s2.socket.data.rewind @cache.instance_variable_set(:@failover, false) @cache.servers = [s1, s2] assert s1.alive? assert s2.alive? e = assert_raise MemCache::MemCacheError do @cache.get('foo') end assert s1.alive? assert !s2.alive? assert_equal "No servers available", e.message end def test_cache_get server = util_setup_fake_server assert_equal "\004\b\"\0170123456789", @cache.cache_get(server, 'my_namespace:key') assert_equal "get my_namespace:key\r\n", server.socket.written.string end def test_cache_get_EOF server = util_setup_fake_server server.socket.data.string = '' e = assert_raise IndexError do @cache.cache_get server, 'my_namespace:key' end assert_equal "No connection to server (NOT CONNECTED)", e.message end def test_cache_get_bad_state server = FakeServer.new # Write two messages to the socket to test failover server.socket.data.write "bogus response\r\nbogus response\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server e = assert_raise IndexError do @cache.cache_get(server, 'my_namespace:key') end assert_match(/#{Regexp.quote 'No connection to server (NOT CONNECTED)'}/, e.message) assert !server.alive? end def test_cache_get_miss socket = FakeSocket.new socket.data.write "END\r\n" socket.data.rewind server = FakeServer.new socket assert_equal nil, @cache.cache_get(server, 'my_namespace:key') assert_equal "get my_namespace:key\r\n", socket.written.string end def test_cache_get_multi server = util_setup_fake_server server.socket.data.write "VALUE foo 0 7\r\n" server.socket.data.write "\004\b\"\bfoo\r\n" server.socket.data.write "VALUE bar 0 7\r\n" server.socket.data.write "\004\b\"\bbar\r\n" server.socket.data.write "END\r\n" server.socket.data.rewind result = @cache.cache_get_multi server, 'foo bar baz' assert_equal 2, result.length assert_equal "\004\b\"\bfoo", result['foo'] assert_equal "\004\b\"\bbar", result['bar'] end def test_cache_get_multi_EOF server = util_setup_fake_server server.socket.data.string = '' e = assert_raise IndexError do @cache.cache_get_multi server, 'my_namespace:key' end assert_equal "No connection to server (NOT CONNECTED)", e.message end def test_cache_get_multi_bad_state server = FakeServer.new # Write two messages to the socket to test failover server.socket.data.write "bogus response\r\nbogus response\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server e = assert_raise IndexError do @cache.cache_get_multi server, 'my_namespace:key' end assert_match(/#{Regexp.quote 'No connection to server (NOT CONNECTED)'}/, e.message) assert !server.alive? end def test_multithread_error server = FakeServer.new server.multithread = false @cache = MemCache.new(['localhost:1'], :multithread => false) server.socket.data.write "bogus response\r\nbogus response\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server assert_nothing_raised do @cache.set 'a', 1 end passed = true Thread.new do begin @cache.set 'b', 2 passed = false rescue MemCache::MemCacheError => me passed = me.message =~ /multiple threads/ end end assert passed end def test_initialize cache = MemCache.new :namespace => 'my_namespace', :readonly => true assert_equal 'my_namespace', cache.namespace assert_equal true, cache.readonly? assert_equal true, cache.servers.empty? end def test_initialize_compatible cache = MemCache.new ['localhost:11211', 'localhost:11212'], :namespace => 'my_namespace', :readonly => true assert_equal 'my_namespace', cache.namespace assert_equal true, cache.readonly? assert_equal false, cache.servers.empty? end def test_initialize_compatible_no_hash cache = MemCache.new ['localhost:11211', 'localhost:11212'] assert_equal nil, cache.namespace assert_equal false, cache.readonly? assert_equal false, cache.servers.empty? end def test_initialize_compatible_one_server cache = MemCache.new 'localhost:11211' assert_equal nil, cache.namespace assert_equal false, cache.readonly? assert_equal false, cache.servers.empty? end def test_initialize_compatible_bad_arg e = assert_raise ArgumentError do cache = MemCache.new Object.new end assert_equal 'first argument must be Array, Hash or String', e.message end def test_initialize_multiple_servers cache = MemCache.new %w[localhost:11211 localhost:11212], :namespace => 'my_namespace', :readonly => true assert_equal 'my_namespace', cache.namespace assert_equal true, cache.readonly? assert_equal false, cache.servers.empty? assert !cache.instance_variable_get(:@continuum).empty? end def test_initialize_too_many_args assert_raises ArgumentError do MemCache.new 1, 2, 3 end end def test_decr server = FakeServer.new server.socket.data.write "5\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.decr 'key' assert_equal "decr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal 5, value end def test_decr_not_found server = FakeServer.new server.socket.data.write "NOT_FOUND\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.decr 'key' assert_equal "decr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal nil, value end def test_decr_space_padding server = FakeServer.new server.socket.data.write "5 \r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.decr 'key' assert_equal "decr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal 5, value end def test_get util_setup_fake_server value = @cache.get 'key' assert_equal "get my_namespace:key\r\n", @cache.servers.first.socket.written.string assert_equal '0123456789', value end def test_fetch_without_a_block server = FakeServer.new server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [server] flexmock(@cache).should_receive(:get).with('key', false).and_return(nil) value = @cache.fetch('key', 1) assert_equal nil, value end def test_fetch_miss server = FakeServer.new server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [server] flexmock(@cache).should_receive(:get).with('key', false).and_return(nil) flexmock(@cache).should_receive(:add).with('key', 'value', 1, false) value = @cache.fetch('key', 1) { 'value' } assert_equal 'value', value end def test_fetch_hit server = FakeServer.new server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [server] flexmock(@cache).should_receive(:get).with('key', false).and_return('value') flexmock(@cache).should_receive(:add).never value = @cache.fetch('key', 1) { raise 'Should not be called.' } assert_equal 'value', value end def test_get_bad_key util_setup_fake_server assert_raise ArgumentError do @cache.get 'k y' end util_setup_fake_server assert_raise ArgumentError do @cache.get 'k' * 250 end end def test_get_cache_get_IOError socket = Object.new def socket.write(arg) raise IOError, 'some io error'; end server = FakeServer.new socket @cache.servers = [] @cache.servers << server e = assert_raise MemCache::MemCacheError do @cache.get 'my_namespace:key' end assert_equal 'some io error', e.message end def test_get_cache_get_SystemCallError socket = Object.new def socket.write(arg) raise SystemCallError, 'some syscall error'; end server = FakeServer.new socket @cache.servers = [] @cache.servers << server e = assert_raise MemCache::MemCacheError do @cache.get 'my_namespace:key' end assert_equal 'unknown error - some syscall error', e.message end def test_get_no_connection @cache.servers = 'localhost:1' e = assert_raise MemCache::MemCacheError do @cache.get 'key' end assert_match(/^No connection to server/, e.message) end def test_get_no_servers @cache.servers = [] e = assert_raise MemCache::MemCacheError do @cache.get 'key' end assert_equal 'No active servers', e.message end def test_get_multi server = FakeServer.new server.socket.data.write "VALUE my_namespace:key 0 14\r\n" server.socket.data.write "\004\b\"\0170123456789\r\n" server.socket.data.write "VALUE my_namespace:keyb 0 14\r\n" server.socket.data.write "\004\b\"\0179876543210\r\n" server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server values = @cache.get_multi 'key', 'keyb' assert_equal "get my_namespace:key my_namespace:keyb\r\n", server.socket.written.string expected = { 'key' => '0123456789', 'keyb' => '9876543210' } assert_equal expected.sort, values.sort end def test_get_multi_raw server = FakeServer.new server.socket.data.write "VALUE my_namespace:key 0 10\r\n" server.socket.data.write "0123456789\r\n" server.socket.data.write "VALUE my_namespace:keyb 0 10\r\n" server.socket.data.write "9876543210\r\n" server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server values = @cache.get_multi 'key', 'keyb', :raw => true assert_equal "get my_namespace:key my_namespace:keyb\r\n", server.socket.written.string expected = { 'key' => '0123456789', 'keyb' => '9876543210' } assert_equal expected.sort, values.sort end def test_get_raw server = FakeServer.new server.socket.data.write "VALUE my_namespace:key 0 10\r\n" server.socket.data.write "0123456789\r\n" server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.get 'key', true assert_equal "get my_namespace:key\r\n", @cache.servers.first.socket.written.string assert_equal '0123456789', value end def test_get_server_for_key server = @cache.get_server_for_key 'key' assert_equal 'localhost', server.host assert_equal 1, server.port end def test_get_server_for_key_multiple s1 = util_setup_server @cache, 'one.example.com', '' s2 = util_setup_server @cache, 'two.example.com', '' @cache.servers = [s1, s2] server = @cache.get_server_for_key 'keya' assert_equal 'two.example.com', server.host server = @cache.get_server_for_key 'keyb' assert_equal 'two.example.com', server.host server = @cache.get_server_for_key 'keyc' assert_equal 'two.example.com', server.host server = @cache.get_server_for_key 'keyd' assert_equal 'one.example.com', server.host end def test_get_server_for_key_no_servers @cache.servers = [] e = assert_raise MemCache::MemCacheError do @cache.get_server_for_key 'key' end assert_equal 'No servers available', e.message end def test_get_server_for_key_spaces e = assert_raise ArgumentError do @cache.get_server_for_key 'space key' end assert_equal 'illegal character in key "space key"', e.message end def test_get_server_for_blank_key e = assert_raise ArgumentError do @cache.get_server_for_key '' end assert_equal 'key cannot be blank', e.message end def test_get_server_for_key_length @cache.get_server_for_key 'x' * 250 long_key = 'x' * 251 e = assert_raise ArgumentError do @cache.get_server_for_key long_key end assert_equal "key too long #{long_key.inspect}", e.message end def test_incr server = FakeServer.new server.socket.data.write "5\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.incr 'key' assert_equal "incr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal 5, value end def test_incr_not_found server = FakeServer.new server.socket.data.write "NOT_FOUND\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.incr 'key' assert_equal "incr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal nil, value end def test_incr_space_padding server = FakeServer.new server.socket.data.write "5 \r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server value = @cache.incr 'key' assert_equal "incr my_namespace:key 1\r\n", @cache.servers.first.socket.written.string assert_equal 5, value end def test_make_cache_key assert_equal 'my_namespace:key', @cache.make_cache_key('key') @cache.namespace = nil assert_equal 'key', @cache.make_cache_key('key') end def test_make_cache_key_without_autofix @cache.autofix_keys = false key = "keys with more than two hundred and fifty characters can cause problems, because they get truncated and start colliding with each other. It's not a common occurrence, but when it happens is very hard to debug. the autofix option takes care of that for you" hash = Digest::SHA1.hexdigest(key) @cache.namespace = nil assert_equal key, @cache.make_cache_key(key) end def test_make_cache_key_with_autofix @cache.autofix_keys = true @cache.namespace = "my_namespace" assert_equal 'my_namespace:key', @cache.make_cache_key('key') @cache.namespace = nil assert_equal 'key', @cache.make_cache_key('key') key = "keys with more than two hundred and fifty characters can cause problems, because they get truncated and start colliding with each other. It's not a common occurrence, but when it happens is very hard to debug. the autofix option takes care of that for you" hash = Digest::SHA1.hexdigest(key) @cache.namespace = "my_namespace" assert_equal "my_namespace:#{hash}-autofixed", @cache.make_cache_key(key) @cache.namespace = nil assert_equal "#{hash}-autofixed", @cache.make_cache_key(key) key = "a short key with spaces" hash = Digest::SHA1.hexdigest(key) @cache.namespace = "my_namespace" assert_equal "my_namespace:#{hash}-autofixed", @cache.make_cache_key(key) @cache.namespace = nil assert_equal "#{hash}-autofixed", @cache.make_cache_key(key) # namespace + separator + key > 250 key = 'k' * 240 hash = Digest::SHA1.hexdigest(key) @cache.namespace = 'n' * 10 assert_equal "#{@cache.namespace}:#{hash}-autofixed", @cache.make_cache_key(key) end def test_servers server = FakeServer.new @cache.servers = [] @cache.servers << server assert_equal [server], @cache.servers end def test_set server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.set 'key', 'value' dumped = Marshal.dump('value') expected = "set my_namespace:key 0 0 #{dumped.length}\r\n#{dumped}\r\n" # expected = "set my_namespace:key 0 0 9\r\n\004\b\"\nvalue\r\n" assert_equal expected, server.socket.written.string end def test_set_expiry server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.set 'key', 'value', 5 dumped = Marshal.dump('value') expected = "set my_namespace:key 0 5 #{dumped.length}\r\n#{dumped}\r\n" assert_equal expected, server.socket.written.string end def test_set_raw server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.set 'key', 'value', 0, true expected = "set my_namespace:key 0 0 5\r\nvalue\r\n" assert_equal expected, server.socket.written.string end def test_set_readonly cache = MemCache.new :readonly => true e = assert_raise MemCache::MemCacheError do cache.set 'key', 'value' end assert_equal 'Update of readonly cache', e.message end def test_check_size_on cache = MemCache.new :check_size => true server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind cache.servers = [] cache.servers << server e = assert_raise MemCache::MemCacheError do cache.set 'key', 'v' * 1048577 end assert_equal 'Value too large, memcached can only store 1MB of data per key', e.message end def test_check_size_off cache = MemCache.new :check_size => false server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind cache.servers = [] cache.servers << server assert_nothing_raised do cache.set 'key', 'v' * 1048577 end end def test_set_too_big server = FakeServer.new # Write two messages to the socket to test failover server.socket.data.write "SERVER_ERROR\r\nSERVER_ERROR object too large for cache\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server e = assert_raise MemCache::MemCacheError do @cache.set 'key', 'v' end assert_match(/object too large for cache/, e.message) end def test_prepend server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.prepend 'key', 'value' dumped = Marshal.dump('value') expected = "prepend my_namespace:key 0 0 5\r\nvalue\r\n" assert_equal expected, server.socket.written.string end def test_append server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.append 'key', 'value' expected = "append my_namespace:key 0 0 5\r\nvalue\r\n" assert_equal expected, server.socket.written.string end def test_replace server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.replace 'key', 'value', 150 dumped = Marshal.dump('value') expected = "replace my_namespace:key 0 150 #{dumped.length}\r\n#{dumped}\r\n" assert_equal expected, server.socket.written.string end def test_add server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.add 'key', 'value' dumped = Marshal.dump('value') expected = "add my_namespace:key 0 0 #{dumped.length}\r\n#{dumped}\r\n" assert_equal expected, server.socket.written.string end def test_add_exists server = FakeServer.new server.socket.data.write "NOT_STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.add 'key', 'value' dumped = Marshal.dump('value') expected = "add my_namespace:key 0 0 #{dumped.length}\r\n#{dumped}\r\n" assert_equal expected, server.socket.written.string end def test_add_expiry server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.add 'key', 'value', 5 dumped = Marshal.dump('value') expected = "add my_namespace:key 0 5 #{dumped.length}\r\n#{dumped}\r\n" assert_equal expected, server.socket.written.string end def test_add_raw server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.add 'key', 'value', 0, true expected = "add my_namespace:key 0 0 5\r\nvalue\r\n" assert_equal expected, server.socket.written.string end def test_add_raw_int server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server @cache.add 'key', 12, 0, true expected = "add my_namespace:key 0 0 2\r\n12\r\n" assert_equal expected, server.socket.written.string end def test_add_readonly cache = MemCache.new :readonly => true e = assert_raise MemCache::MemCacheError do cache.add 'key', 'value' end assert_equal 'Update of readonly cache', e.message end def test_delete server = FakeServer.new @cache.servers = [] @cache.servers << server @cache.delete 'key' expected = "delete my_namespace:key\r\n" assert_equal expected, server.socket.written.string end def test_delete_with_expiry server = FakeServer.new @cache.servers = [] @cache.servers << server @cache.delete 'key', 300 expected = "delete my_namespace:key\r\n" assert_equal expected, server.socket.written.string end def test_flush_all @cache.servers = [] 3.times { @cache.servers << FakeServer.new } @cache.flush_all expected = "flush_all\r\n" @cache.servers.each do |server| assert_equal expected, server.socket.written.string end end def test_flush_all_with_delay @cache.servers = [] 3.times { @cache.servers << FakeServer.new } @cache.flush_all(10) @cache.servers.each_with_index do |server, idx| expected = "flush_all #{idx*10}\r\n" assert_equal expected, server.socket.written.string end end def test_flush_all_failure socket = FakeSocket.new # Write two messages to the socket to test failover socket.data.write "ERROR\r\nERROR\r\n" socket.data.rewind server = FakeServer.new socket @cache.servers = [] @cache.servers << server assert_raise MemCache::MemCacheError do @cache.flush_all end assert_match(/flush_all\r\n/, socket.written.string) end def test_flush_all_for_real requirement(memcached_running?, 'A real memcached server must be running for testing flush_all') do cache = MemCache.new "localhost:11211", :namespace => "test_flush_all" k, v = "1234", "test" assert_nil cache.get(k) cache.set(k, v) assert_equal v, cache.get(k) cache.flush_all assert_nil cache.get(k) end end def test_stats socket = FakeSocket.new socket.data.write "STAT pid 20188\r\nSTAT total_items 32\r\nSTAT version 1.2.3\r\nSTAT rusage_user 1:300\r\nSTAT dummy ok\r\nEND\r\n" socket.data.rewind server = FakeServer.new socket def server.host() 'localhost'; end def server.port() 11211; end @cache.servers = [] @cache.servers << server expected = { 'localhost:11211' => { 'pid' => 20188, 'total_items' => 32, 'version' => '1.2.3', 'rusage_user' => 1.0003, 'dummy' => 'ok' } } assert_equal expected, @cache.stats assert_equal "stats\r\n", socket.written.string end def test_basic_threaded_operations_should_work cache = MemCache.new :multithread => true, :namespace => 'my_namespace', :readonly => false server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind cache.servers = [] cache.servers << server assert cache.multithread assert_nothing_raised do cache.set "test", "test value" end output = server.socket.written.string assert_match(/set my_namespace:test/, output) assert_match(/test value/, output) end def test_namespace_separator cache = MemCache.new :namespace => 'ns', :namespace_separator => '' server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind cache.servers = [] cache.servers << server assert_nothing_raised do cache.set "test", "test value" end output = server.socket.written.string assert_match(/set nstest/, output) assert_match(/test value/, output) end def test_basic_unthreaded_operations_should_work cache = MemCache.new :multithread => false, :namespace => 'my_namespace', :readonly => false server = FakeServer.new server.socket.data.write "STORED\r\n" server.socket.data.rewind cache.servers = [] cache.servers << server assert !cache.multithread assert_nothing_raised do cache.set "test", "test value" end output = server.socket.written.string assert_match(/set my_namespace:test/, output) assert_match(/test value/, output) end def util_setup_fake_server server = FakeServer.new server.socket.data.write "VALUE my_namespace:key 0 14\r\n" server.socket.data.write "\004\b\"\0170123456789\r\n" server.socket.data.write "END\r\n" server.socket.data.rewind @cache.servers = [] @cache.servers << server return server end def util_setup_server(memcache, host, responses) server = MemCache::Server.new memcache, host server.instance_variable_set :@sock, StringIO.new(responses) @cache.servers = [] @cache.servers << server return server end def test_crazy_multithreaded_access requirement(memcached_running?, 'A real memcached server must be running for performance testing') do # Use a null logger to verify logging doesn't blow up at runtime cache = MemCache.new(['localhost:11211', '127.0.0.1:11211'], :logger => Logger.new('/dev/null')) cache.flush_all assert_equal true, cache.multithread workers = [] cache.set('f', 'zzz') assert_equal "STORED\r\n", (cache.cas('f') do |value| value << 'z' end) assert_equal 'zzzz', cache.get('f') # Have a bunch of threads perform a bunch of operations at the same time. # Verify the result of each operation to ensure the request and response # are not intermingled between threads. 10.times do workers << Thread.new do 100.times do cache.set('a', 9) cache.set('b', 11) cache.add('c', 10, 0, true) cache.set('d', 'a', 100, true) cache.set('e', 'x', 100, true) cache.set('f', 'zzz') assert_not_nil(cache.cas('f') do |value| value << 'z' end) cache.append('d', 'b') cache.prepend('e', 'y') assert_equal "NOT_STORED\r\n", cache.add('a', 11) assert_equal({ 'a' => 9, 'b' => 11 }, cache.get_multi(['a', 'b'])) inc = cache.incr('c', 10) assert_equal 0, inc % 5 assert inc > 14 assert cache.decr('c', 5) > 14 assert_equal 11, cache.get('b') d = cache.get('d', true) assert_match(/\Aab*\Z/, d) e = cache.get('e', true) assert_match(/\Ay*x\Z/, e) end end end workers.each { |w| w.join } cache.flush_all end end end memcache-client-1.8.5/test/test_event_machine.rb0000644000004100000410000000611511724022457021752 0ustar www-datawww-data# encoding: ascii-8bit require 'test/unit' require 'memcache' class TestEventMachine < Test::Unit::TestCase def test_concurrent_fibers return puts("Skipping EventMachine test, not Ruby 1.9") if RUBY_VERSION < '1.9' return puts("Skipping EventMachine test, no live server") if !live_server? require 'eventmachine' require 'memcache/event_machine' ex = nil m = MemCache.new(['127.0.0.1:11211', 'localhost:11211']) within_em(3) do begin key1 = 'foo' key2 = 'bar'*50 key3 = '£∞'*45 value1 = 'abc' value2 = 'xyz'*1000 value3 = '∞§¶•ª'*1000 100.times do assert_equal "STORED\r\n", m.set(key1, value1) assert_equal "STORED\r\n", m.set(key2, value2) assert_equal "STORED\r\n", m.set(key3, value3) m.get(key1) m.get(key2) m.get(key3) assert m.delete(key1) assert_equal "STORED\r\n", m.set(key1, value2) m.get(key1) assert_equal "STORED\r\n", m.set(key2, value3) m.get(key2) assert_equal "STORED\r\n", m.set(key3, value1) m.get(key3) h = m.get_multi(key1, key2, key3) assert h assert_equal Hash, h.class assert h.size > 0 end rescue Exception => exp puts exp.message ex = exp ensure EM.stop end end raise ex if ex end def test_live_server return puts("Skipping EventMachine test, not Ruby 1.9") if RUBY_VERSION < '1.9' return puts("Skipping EventMachine test, no live server") if !live_server? require 'eventmachine' require 'memcache/event_machine' ex = nil within_em do begin m = MemCache.new(['127.0.0.1:11211', 'localhost:11211']) key1 = 'foo' key2 = 'bar'*50 key3 = '£∞'*50 value1 = 'abc' value2 = 'xyz'*1000 value3 = '∞§¶•ª'*1000 1000.times do assert_equal "STORED\r\n", m.set(key1, value1) assert_equal "STORED\r\n", m.set(key2, value2) assert_equal "STORED\r\n", m.set(key3, value3) assert_equal value1, m.get(key1) assert_equal value2, m.get(key2) assert_equal value3, m.get(key3) assert_equal "DELETED\r\n", m.delete(key1) assert_equal "STORED\r\n", m.set(key1, value2) assert_equal value2, m.get(key1) assert_equal "STORED\r\n", m.set(key2, value3) assert_equal value3, m.get(key2) assert_equal "STORED\r\n", m.set(key3, value1) assert_equal value1, m.get(key3) assert_equal({ key1 => value2, key2 => value3, key3 => value1 }, m.get_multi(key1, key2, key3)) end rescue Exception => exp puts exp.message ex = exp ensure EM.stop end end raise ex if ex end private def within_em(count=1, &block) EM.run do count.times do Fiber.new(&block).resume end end end def live_server? TCPSocket.new('localhost', 11211) rescue nil end endmemcache-client-1.8.5/test/test_benchmark.rb0000644000004100000410000000655611724022457021110 0ustar www-datawww-datarequire 'rubygems' require 'benchmark' require 'test/unit' $TESTING = true require 'memcache' class TestBenchmark < Test::Unit::TestCase def setup puts "Testing #{MemCache::VERSION}" # We'll use a simple @value to try to avoid spending time in Marshal, # which is a constant penalty that both clients have to pay @value = [] @marshalled = Marshal.dump(@value) @opts = [ ['127.0.0.1:11211', 'localhost:11211'], { :namespace => "namespace", # :no_reply => true, # :timeout => nil, } ] @key1 = "Short" @key2 = "Sym1-2-3::45"*8 @key3 = "Long"*40 @key4 = "Medium"*8 # 5 and 6 are only used for multiget miss test @key5 = "Medium2"*8 @key6 = "Long3"*40 end def test_em return if RUBY_VERSION < '1.9' require 'eventmachine' require 'memcache/event_machine' puts "with EventMachine" EM.run do Fiber.new do test_benchmark EM.stop end.resume end end def test_benchmark Benchmark.bm(31) do |x| n = 2500 @m = MemCache.new(*@opts) x.report("set:plain:memcache-client") do n.times do @m.set @key1, @marshalled, 0, true @m.set @key2, @marshalled, 0, true @m.set @key3, @marshalled, 0, true @m.set @key1, @marshalled, 0, true @m.set @key2, @marshalled, 0, true @m.set @key3, @marshalled, 0, true end end @m = MemCache.new(*@opts) x.report("set:ruby:memcache-client") do n.times do @m.set @key1, @value @m.set @key2, @value @m.set @key3, @value @m.set @key1, @value @m.set @key2, @value @m.set @key3, @value end end @m = MemCache.new(*@opts) x.report("get:plain:memcache-client") do n.times do @m.get @key1, true @m.get @key2, true @m.get @key3, true @m.get @key1, true @m.get @key2, true @m.get @key3, true end end @m = MemCache.new(*@opts) x.report("get:ruby:memcache-client") do n.times do @m.get @key1 @m.get @key2 @m.get @key3 @m.get @key1 @m.get @key2 @m.get @key3 end end @m = MemCache.new(*@opts) x.report("multiget:ruby:memcache-client") do n.times do # We don't use the keys array because splat is slow @m.get_multi @key1, @key2, @key3, @key4, @key5, @key6 end end @m = MemCache.new(*@opts) x.report("missing:ruby:memcache-client") do n.times do begin @m.delete @key1; rescue; end begin @m.get @key1; rescue; end begin @m.delete @key2; rescue; end begin @m.get @key2; rescue; end begin @m.delete @key3; rescue; end begin @m.get @key3; rescue; end end end @m = MemCache.new(*@opts) x.report("mixed:ruby:memcache-client") do n.times do @m.set @key1, @value @m.set @key2, @value @m.set @key3, @value @m.get @key1 @m.get @key2 @m.get @key3 @m.set @key1, @value @m.get @key1 @m.set @key2, @value @m.get @key2 @m.set @key3, @value @m.get @key3 end end assert true end end endmemcache-client-1.8.5/README.rdoc0000644000004100000410000000350011724022457016403 0ustar www-datawww-data= memcache-client A ruby library for accessing memcached. Source: http://github.com/mperham/memcache-client == Installing memcache-client Just install the gem: $ sudo gem install memcache-client == Using memcache-client With one server: CACHE = MemCache.new 'localhost:11211' Or with multiple servers: CACHE = MemCache.new %w[one.example.com:11211 two.example.com:11211] == Tuning memcache-client The MemCache.new method takes a number of options which can be useful at times. Please read the source comments there for an overview. If you are using Ruby 1.8.x and using multiple memcached servers, you should install the RubyInline gem for ultimate performance. == Using memcache-client with Rails Rails 2.1+ includes memcache-client 1.5.0 out of the box. See ActiveSupport::Cache::MemCacheStore and the Rails.cache method for more details. Rails 2.3+ will use the latest memcache-client gem installed. == Using memcache-client with EventMachine memcache-client 1.8.0 added support for native EventMachine connections using Ruby 1.9. If you are using an EventMachine-based application (e.g. thin), you can activate the EventMachine support like so: require 'memcache' require 'memcache/event_machine' EM.run do Fiber.new do m = MemCache.new('localhost:11211') m.set 'abc', 'xyz' m.get 'abc' end.resume end == Questions? memcache-client is maintained by Mike Perham and was originally written by Bob Cottrell, Eric Hodel and the seattle.rb crew. Email:: mailto:mperham@gmail.com Twitter:: mperham[http://twitter.com/mperham] WWW:: http://mikeperham.com If my work on memcache-client is something you support, please take a moment to recommend me at WWR[http://workingwithrails.com/person/10797-mike-perham]. I'm not asking for money, just a electronic "thumbs up". memcache-client-1.8.5/Rakefile0000644000004100000410000000217211724022457016246 0ustar www-datawww-data# vim: syntax=Ruby require 'rubygems' require 'rake/rdoctask' require 'rake/testtask' require File.dirname(__FILE__) + "/lib/memcache/version.rb" begin require 'jeweler' Jeweler::Tasks.new do |s| s.name = "memcache-client" s.version = MemCache::VERSION s.summary = s.description = "A Ruby library for accessing memcached." s.email = "mperham@gmail.com" s.homepage = "http://github.com/mperham/memcache-client" s.authors = ['Eric Hodel', 'Robert Cottrell', 'Mike Perham'] s.has_rdoc = true s.files = FileList["[A-Z]*", "{lib,test}/**/*", 'performance.txt'] s.test_files = FileList["test/test_*.rb"] s.executables = ['memcached_top'] end Jeweler::GemcutterTasks.new rescue LoadError puts "Jeweler not available. Install it for jeweler-related tasks with: sudo gem install jeweler" end Rake::RDocTask.new do |rd| rd.main = "README.rdoc" rd.rdoc_files.include("README.rdoc", "FAQ.rdoc", "History.rdoc", "lib/memcache.rb") rd.rdoc_dir = 'doc' end Rake::TestTask.new do |t| t.warning = true t.libs = ['lib', 'test'] end task :default => :test task :rcov do `rcov -Ilib test/*.rb` end memcache-client-1.8.5/performance.txt0000644000004100000410000002240411724022457017643 0ustar www-datawww-data== 1.5.0, 1.8.6 (default in Rails 2.2 and lower) user system total real set:plain:memcache-client 41.550000 0.590000 42.140000 ( 43.740685) set:ruby:memcache-client 41.540000 0.590000 42.130000 ( 43.733796) get:plain:memcache-client 41.920000 0.610000 42.530000 ( 44.031005) get:ruby:memcache-client 41.940000 0.600000 42.540000 ( 44.082447) multiget:ruby:memcache-client 46.120000 0.440000 46.560000 ( 47.354041) missing:ruby:memcache-client 41.490000 0.580000 42.070000 ( 43.610837) mixed:ruby:memcache-client 83.820000 1.190000 85.010000 ( 88.117077) == 1.7.0, timeout, 1.8.6 (closest to default in Rails 2.3) user system total real set:plain:memcache-client 4.320000 2.280000 6.600000 ( 7.102900) set:ruby:memcache-client 4.400000 2.300000 6.700000 ( 6.856992) get:plain:memcache-client 9.890000 6.830000 16.720000 ( 16.984208) get:ruby:memcache-client 10.040000 6.890000 16.930000 ( 17.141128) multiget:ruby:memcache-client 5.350000 4.110000 9.460000 ( 9.542898) missing:ruby:memcache-client 4.710000 3.180000 7.890000 ( 8.030969) mixed:ruby:memcache-client 14.540000 9.200000 23.740000 ( 24.121824) == 1.7.0, timeout, system_timer, 1.8.6 user system total real set:plain:memcache-client 3.840000 0.640000 4.480000 ( 4.643790) set:ruby:memcache-client 3.930000 0.650000 4.580000 ( 4.731868) get:plain:memcache-client 8.320000 1.290000 9.610000 ( 9.903877) get:ruby:memcache-client 8.460000 1.310000 9.770000 ( 9.986694) multiget:ruby:memcache-client 4.250000 0.560000 4.810000 ( 4.935326) missing:ruby:memcache-client 3.840000 0.640000 4.480000 ( 4.569696) mixed:ruby:memcache-client 12.400000 1.960000 14.360000 ( 14.857924) == 1.7.0, timeout, 1.9.1 user system total real set:plain:memcache-client 2.130000 2.150000 4.280000 ( 3.774238) set:ruby:memcache-client 2.230000 2.230000 4.460000 ( 3.883686) get:plain:memcache-client 4.030000 4.250000 8.280000 ( 6.702740) get:ruby:memcache-client 4.090000 4.220000 8.310000 ( 6.749134) multiget:ruby:memcache-client 1.960000 1.840000 3.800000 ( 3.089448) missing:ruby:memcache-client 2.110000 2.210000 4.320000 ( 3.659019) mixed:ruby:memcache-client 6.400000 6.560000 12.960000 ( 11.116317) == 1.7.0, no timeout, 1.9.1 user system total real set:plain:memcache-client 0.560000 0.320000 0.880000 ( 1.849380) set:ruby:memcache-client 0.630000 0.320000 0.950000 ( 1.968208) get:plain:memcache-client 0.640000 0.330000 0.970000 ( 1.962473) get:ruby:memcache-client 0.690000 0.320000 1.010000 ( 2.002295) multiget:ruby:memcache-client 0.460000 0.110000 0.570000 ( 0.885827) missing:ruby:memcache-client 0.530000 0.320000 0.850000 ( 1.721371) mixed:ruby:memcache-client 1.340000 0.660000 2.000000 ( 3.973213) == 1.7.0, no timeout, 1.8.6 user system total real set:plain:memcache-client 1.220000 0.310000 1.530000 ( 2.763310) set:ruby:memcache-client 1.270000 0.300000 1.570000 ( 2.806251) get:plain:memcache-client 1.400000 0.300000 1.700000 ( 2.944343) get:ruby:memcache-client 1.450000 0.310000 1.760000 ( 2.997234) multiget:ruby:memcache-client 1.120000 0.110000 1.230000 ( 1.665716) missing:ruby:memcache-client 1.160000 0.300000 1.460000 ( 2.683376) mixed:ruby:memcache-client 2.760000 0.610000 3.370000 ( 5.851047) == 1.7.1, timeout, 1.8.6, raw + gets SystemTimer user system total real set:plain:memcache-client 2.670000 0.510000 3.180000 ( 3.489509) set:ruby:memcache-client 2.810000 0.530000 3.340000 ( 3.675955) get:plain:memcache-client 4.380000 0.720000 5.100000 ( 5.400587) get:ruby:memcache-client 4.490000 0.730000 5.220000 ( 5.477543) multiget:ruby:memcache-client 2.570000 0.310000 2.880000 ( 3.034944) missing:ruby:memcache-client 2.800000 0.530000 3.330000 ( 3.547073) mixed:ruby:memcache-client 7.460000 1.250000 8.710000 ( 9.272177) == 1.7.1, timeout, 1.9.1, raw + gets Timeout user system total real set:plain:memcache-client 1.370000 1.300000 2.670000 ( 2.708669) set:ruby:memcache-client 1.400000 1.240000 2.640000 ( 2.713737) get:plain:memcache-client 2.070000 2.020000 4.090000 ( 3.950879) get:ruby:memcache-client 2.160000 2.090000 4.250000 ( 3.924613) multiget:ruby:memcache-client 1.080000 0.820000 1.900000 ( 1.744107) missing:ruby:memcache-client 1.330000 1.270000 2.600000 ( 2.547597) mixed:ruby:memcache-client 3.540000 3.270000 6.810000 ( 6.735349) == 1.7.1, timeout, 1.8.6, raw + gets SystemTimer, native binary search user system total real set:plain:memcache-client 1.840000 0.450000 2.290000 ( 2.651285) set:ruby:memcache-client 1.960000 0.460000 2.420000 ( 2.712650) get:plain:memcache-client 3.180000 0.630000 3.810000 ( 4.079930) get:ruby:memcache-client 3.290000 0.640000 3.930000 ( 4.242648) multiget:ruby:memcache-client 1.640000 0.250000 1.890000 ( 2.003687) missing:ruby:memcache-client 1.940000 0.450000 2.390000 ( 2.619675) mixed:ruby:memcache-client 5.360000 1.100000 6.460000 ( 7.040998) == 1.7.2, timeout, 1.8.6, SystemTimer, native binary search user system total real set:plain:memcache-client 3.260000 0.590000 3.850000 ( 4.067382) set:ruby:memcache-client 3.370000 0.590000 3.960000 ( 4.364004) get:plain:memcache-client 6.740000 1.240000 7.980000 ( 8.586676) get:ruby:memcache-client 6.780000 1.210000 7.990000 ( 8.423400) multiget:ruby:memcache-client 3.480000 0.540000 4.020000 ( 4.288633) missing:ruby:memcache-client 3.250000 0.590000 3.840000 ( 4.043602) mixed:ruby:memcache-client 10.150000 1.810000 11.960000 ( 12.372054) == 1.7.4, 1.8.6, buffered and non-blocking IO user system total real set:plain:memcache-client 2.450000 0.790000 3.240000 ( 3.397091) set:ruby:memcache-client 2.490000 0.790000 3.280000 ( 3.555436) get:plain:memcache-client 2.840000 0.810000 3.650000 ( 3.759695) get:ruby:memcache-client 2.890000 0.790000 3.680000 ( 3.778011) multiget:ruby:memcache-client 1.380000 0.280000 1.660000 ( 1.695290) missing:ruby:memcache-client 2.380000 0.780000 3.160000 ( 3.251136) mixed:ruby:memcache-client 5.360000 1.600000 6.960000 ( 7.189314) == memcached 0.13 + libmemcached 0.25.4 versus memcache-client 1.7.4 user system total real set:plain:noblock:memcached 0.090000 0.030000 0.120000 ( 0.277929) set:plain:memcached 0.220000 0.270000 0.490000 ( 1.251547) set:plain:memcache-client 0.610000 0.270000 0.880000 ( 1.670718) set:ruby:noblock:memcached 0.150000 0.020000 0.170000 ( 0.309201) set:ruby:memcached 0.300000 0.290000 0.590000 ( 1.390354) set:ruby:memcache-client 0.670000 0.270000 0.940000 ( 1.713558) get:plain:memcached 0.240000 0.270000 0.510000 ( 1.169909) get:plain:memcache-client 0.850000 0.270000 1.120000 ( 1.885270) get:ruby:memcached 0.270000 0.280000 0.550000 ( 1.229705) get:ruby:memcache-client 0.890000 0.260000 1.150000 ( 1.861660) multiget:ruby:memcached 0.190000 0.090000 0.280000 ( 0.396264) multiget:ruby:memcache-client 0.530000 0.100000 0.630000 ( 0.901016) missing:ruby:memcached 0.280000 0.290000 0.570000 ( 1.254400) missing:ruby:memcached:inline 0.300000 0.290000 0.590000 ( 1.235122) missing:ruby:memcache-client 0.570000 0.250000 0.820000 ( 1.461293) mixed:ruby:noblock:memcached 0.540000 0.620000 1.160000 ( 2.429200) mixed:ruby:memcached 0.580000 0.570000 1.150000 ( 2.610819) mixed:ruby:memcache-client 1.580000 0.540000 2.120000 ( 3.632775) == 1.7.6, 1.8.7 64-bit (Snow Leopard), SystemTimer user system total real set:plain:memcache-client 3.070000 0.380000 3.450000 ( 3.643275) set:ruby:memcache-client 3.140000 0.370000 3.510000 ( 3.698602) get:plain:memcache-client 3.480000 0.360000 3.840000 ( 3.983941) get:ruby:memcache-client 3.540000 0.360000 3.900000 ( 4.034308) multiget:ruby:memcache-client 1.690000 0.140000 1.830000 ( 1.889290) missing:ruby:memcache-client 3.070000 0.360000 3.430000 ( 3.571754) mixed:ruby:memcache-client 6.720000 0.750000 7.470000 ( 7.838771) memcache-client-1.8.5/LICENSE.txt0000644000004100000410000000271411724022457016426 0ustar www-datawww-dataCopyright 2005-2009 Bob Cottrell, Eric Hodel, Mike Perham. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the names of the authors nor the names of their contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. memcache-client-1.8.5/bin/0000755000004100000410000000000011724022457015347 5ustar www-datawww-datamemcache-client-1.8.5/bin/memcached_top0000755000004100000410000000234711724022457020073 0ustar www-datawww-data#!/usr/bin/env ruby require 'optparse' require 'ostruct' require 'socket' @options = OpenStruct.new @options.hostname = 'localhost' @options.port = 11211 op = OptionParser.new do |opts| opts.banner = "View memcached server statistics\nUsage: #{$0} [options]" opts.separator "General Options:" opts.on("-h HOSTNAME", "--hostname=HOSTNAME", "Hostname [default: localhost]") do |h| @options.hostname = h end opts.on("-p PORT", "--port=PORT", Integer, "Port [default: 11211]") do |p| @options.port = p end opts.on_tail("--help", "Show this message") do puts opts exit end end op.parse! def stats_data data = '' sock = TCPSocket.new(@options.hostname, @options.port) sock.print("stats\r\n") sock.flush # memcached does not close the socket once it is done writing # the stats data. We need to read line by line until we detect # the END line and then stop/close on our side. stats = sock.gets while true data += stats break if stats.strip == 'END' stats = sock.gets end sock.close data end def parse(stats_data) stats = [] stats_data.each_line do |line| stats << "#{$1}: #{$2}" if line =~ /STAT (\w+) (\S+)/ end stats.sort end stats = parse(stats_data) stats.each do |stat| puts stat endmemcache-client-1.8.5/metadata.yml0000644000004100000410000000315011724022457017101 0ustar www-datawww-data--- !ruby/object:Gem::Specification name: memcache-client version: !ruby/object:Gem::Version hash: 61 prerelease: false segments: - 1 - 8 - 5 version: 1.8.5 platform: ruby authors: - Eric Hodel - Robert Cottrell - Mike Perham autorequire: bindir: bin cert_chain: [] date: 2010-07-05 00:00:00 -07:00 default_executable: memcached_top dependencies: [] description: A Ruby library for accessing memcached. email: mperham@gmail.com executables: - memcached_top extensions: [] extra_rdoc_files: - LICENSE.txt - README.rdoc files: - FAQ.rdoc - History.rdoc - LICENSE.txt - README.rdoc - Rakefile - lib/continuum_native.rb - lib/memcache.rb - lib/memcache/event_machine.rb - lib/memcache/version.rb - lib/memcache_util.rb - performance.txt - test/test_benchmark.rb - test/test_event_machine.rb - test/test_mem_cache.rb - bin/memcached_top has_rdoc: true homepage: http://github.com/mperham/memcache-client licenses: [] post_install_message: rdoc_options: - --charset=UTF-8 require_paths: - lib required_ruby_version: !ruby/object:Gem::Requirement none: false requirements: - - ">=" - !ruby/object:Gem::Version hash: 3 segments: - 0 version: "0" required_rubygems_version: !ruby/object:Gem::Requirement none: false requirements: - - ">=" - !ruby/object:Gem::Version hash: 3 segments: - 0 version: "0" requirements: [] rubyforge_project: rubygems_version: 1.3.7 signing_key: specification_version: 3 summary: A Ruby library for accessing memcached. test_files: - test/test_benchmark.rb - test/test_event_machine.rb - test/test_mem_cache.rb memcache-client-1.8.5/FAQ.rdoc0000644000004100000410000000304311724022457016057 0ustar www-datawww-data= Memcache-client FAQ == Does memcache-client work with Ruby 1.9? Yes, Ruby 1.9 is supported. The test suite should pass completely on 1.8.6 and 1.9.1. == I'm seeing "execution expired" or "time's up!" errors, what's that all about? memcache-client 1.6.x+ now has socket operations timed out by default. This is to prevent the Ruby process from hanging if memcached or starling get into a bad state, which has been seen in production by both 37signals and FiveRuns. The default timeout is 0.5 seconds, which should be more than enough time under normal circumstances. It's possible to hit a storm of concurrent events which cause this timer to expire: a large Ruby VM can cause the GC to take a while, while also storing a large (500k-1MB value), for example. You can increase the timeout or disable them completely with the following configuration: Rails: config.cache_store = :mem_cache_store, 'server1', 'server2', { :timeout => nil } # no timeout native: MemCache.new ['server1', 'server2'], { :timeout => 1.0 } # 1 second timeout == Isn't Evan Weaver's memcached gem faster? The latest version of memcached-client is anywhere from 33% to 100% slower than memcached in various benchmarks. Keep in mind this means that 10,000 get requests take 1.8 sec instead of 1.2 seconds. In practice, memcache-client is unlikely to be a bottleneck in your system but there is always going to be an overhead to pure Ruby. memcache-client does have the advantage of built-in integration into Rails and should work on non-MRI platforms: JRuby, MacRuby, etc. memcache-client-1.8.5/History.rdoc0000644000004100000410000002515211724022457017116 0ustar www-datawww-data= 1.8.5 (2010-07-05) * Fix bad release = 1.8.4 (2010-07-02) * Fix unfibered usage of memcache-client in EM * Remove nag message (tenderlove) = 1.8.3 (2010-04-26) * Don't allow blank keys. (Bill Horsman) = 1.8.2 (2010-04-03) * Fix concurrency issues with eventmachine support. = 1.8.1 (2010-03-20) * Only require SystemTimer if the Ruby VM looks like MRI. * Remove VERSION.yml usage as we should avoid using files outside of lib at runtime. (josh) = 1.8.0 (2010-03-05) * Add support for EventMachine-based connections. * Add support for raw values in get_multi * Add memcached_top binary for gathering server statistics = 1.7.8 (2010-02-03) * Fix issue where autofix_keys logic did not account for namespace length. (menno) * Fix issue when using memcache-client without rubygems. (anvar) * Fix issue when using the cas method with raw=true (Takahiro Kikumoto) = 1.7.7 (2009-11-24) * Fix invalid delete request in memcached 1.4.x. The expiry parameter to MemCache#delete is now ignored as memcached 1.4.x has dropped support for this feature. = 1.7.6 (2009-11-03) * Reworked socket timeout code due to several customer complaints about timeouts not working 100% of the time since 1.7.3. * Add option to configure the namespace separator string, for interop with Perl which does not use a separator character: MemCache.new(servers, :namespace_separator => '') * Move to jeweler and gemcutter for RubyGem support. = 1.7.5 (2009-09-09) * Fix ruby warnings (josh) * Make megabyte value size limit optional since Tokyo Tyrant can accept values larger than 1MB. Use :check_size => false to disable the size check. (jsl) * Ruby 1.9 support for recent I/O changes. * Fix duplicate value marshalling on server error. (rajiv) * Added option :autofix_keys (disabled by default) to replace long keys with md5 hashes (sd) = 1.7.4 (2009-06-09) * Fix issue with raising timeout errors. = 1.7.3 (2009-06-06) * Remove SystemTimer support, refactor I/O to use nonblocking operations. Speeds up performance approx 100%. Timeouts basically have no overhead now! (tenderlove) * Update load logic to support SystemTimer running in Ruby Enterprise Edition. Thanks to splattael on github for the comment. = 1.7.2 (2009-04-12) * Rollback socket timeout optimization. It does not work on all operating systems and was a support headache. = 1.7.1 (2009-03-28) * Performance optimizations: * Rely on higher performance operating system socket timeouts for low-level socket read/writes where possible, instead of the (slower) SystemTimer or (slowest, unreliable) Timeout libraries. * the native binary search is back! The recent performance tuning made the binary search a bottleneck again so it had to return. It uses RubyInline to compile the native extension and silently falls back to pure Ruby if anything fails. Make sure you run: `gem install RubyInline` if you want ultimate performance. * the changes make memcache-client 100% faster than 1.7.0 in my performance test on Ruby 1.8.6: 15 sec -> 8 sec. * Fix several logging issues. = 1.7.0 (2009-03-08) * Go through the memcached protocol document and implement any commands not already implemented: - cas - append - prepend - replace Append and prepend only work with raw data since it makes no sense to concatenate two Marshalled values together. The cas functionality should be considered a prototype. Since I don't have an application which uses +cas+, I'm not sure what semantic sugar the API should provide. Should it retry if the value was changed? Should it massage the returned string into true/false? Feedback would be appreciated. * Add fetch method which provides a method very similar to ActiveSupport::Cache::Store#fetch, basically a wrapper around get and add. (djanowski) * Implement the flush_all delay parameter, to allow a large memcached farm to be flushed gradually. * Implement the noreply flag, which tells memcached not to reply in operations which don't need a reply, i.e. set/add/delete/flush_all. * The only known functionality not implemented anymore is the parameter to the storage commands. This would require modification of the API method signatures. If someone can come up with a clean way to implement it, I would be happy to consider including it. = 1.6.5 (2009-02-27) * Change memcache-client to multithreaded by default. The mutex does not add significant overhead and it is far too easy, now that Sinatra, Rails and Merb are all thread-safe, to use memcache-client in a thread-unsafe manner. Remove some unnecessary mutexing and add a test to verify heavily multithreaded usage does not act unexpectedly. * Add optional support for the SystemTimer gem when running on Ruby 1.8.x. This gem is highly recommended - it ensures timeouts actually work and halves the overhead of using timeouts. Using this gem, Ruby 1.8.x is actually faster in my performance tests than Ruby 1.9.x. Just "gem install SystemTimer" and it should be picked up automatically. = 1.6.4 (2009-02-19) * Remove native code altogether. The speedup was only 10% on Ruby 1.8.6 and did not work on Ruby 1.9.1. * Removed memcache_util.rb from the distribution. If you are using it, please copy the code into your own project. The file will live in the github repository for a few more months for this purposes. http://github.com/mperham/memcache-client/raw/7a276089aa3c914e47e3960f9740ac7377204970/lib/memcache_util.rb * Roll continuum.rb into memcache.rb. The project is again a single Ruby file, with no dependencies. = 1.6.3 (2009-02-14) * Remove gem native extension in preference to RubyInline. This allows the gem to install and work on JRuby and Ruby 1.8.5 when the native code fails to compile. = 1.6.2 (2009-02-04) * Validate that values are less than one megabyte in size. * Refactor error handling in get_multi to handle server failures and return what values we could successfully retrieve. * Add optional logging parameter for debugging and tracing. * First official release since 1.5.0. Thanks to Eric Hodel for turning over the project to me! New project home page: http://github.com/mperham/memcache-client = 1.6.1 (2009-01-28) * Add option to disable socket timeout support. Socket timeout has a significant performance penalty (approx 3x slower than without in Ruby 1.8.6). You can turn off the timeouts if you need absolute performance, but by default timeouts are enabled. The performance penalty is much lower in Ruby 1.8.7, 1.9 and JRuby. (mperham) * Add option to disable server failover. Failover can lead to "split-brain" caches that return stale data. (mperham) * Implement continuum binary search in native code for performance reasons. Pure ruby is available for platforms like JRuby or Rubinius which can't use C extensions. (mperham) * Fix #add with raw=true (iamaleksey) = 1.6.0 * Implement a consistent hashing algorithm, as described in libketama. This dramatically reduces the cost of adding or removing servers dynamically as keys are much more likely to map to the same server. Take a scenario where we add a fourth server. With a naive modulo algorithm, about 25% of the keys will map to the same server. In other words, 75% of your memcached content suddenly becomes invalid. With a consistent algorithm, 75% of the keys will map to the same server as before - only 25% will be invalidated. (mperham) * Implement socket timeouts, should fix rare cases of very bad things happening in production at 37signals and FiveRuns. (jseirles) = 1.5.0.5 * Remove native C CRC32_ITU_T extension in favor of Zlib's crc32 method. memcache-client is now pure Ruby again and will work with JRuby and Rubinius. = 1.5.0.4 * Get test suite working again (packagethief) * Ruby 1.9 compatiblity fixes (packagethief, mperham) * Consistently return server responses and check for errors (packagethief) * Properly calculate CRC in Ruby 1.9 strings (mperham) * Drop rspec in favor of test/unit, for 1.9 compat (mperham) = 1.5.0.3 (FiveRuns fork) * Integrated ITU-T CRC32 operation in native C extension for speed. Thanks to Justin Balthrop! = 1.5.0.2 (FiveRuns fork) * Add support for seamless failover between servers. If one server connection dies, the client will retry the operation on another server before giving up. * Merge Will Bryant's socket retry patch. http://willbryant.net/software/2007/12/21/ruby-memcache-client-reconnect-and-retry = 1.5.0.1 (FiveRuns fork) * Fix set not handling client disconnects. http://dev.twitter.com/2008/02/solving-case-of-missing-updates.html = 1.5.0 * Add MemCache#flush_all command. Patch #13019 and bug #10503. Patches submitted by Sebastian Delmont and Rick Olson. * Type-cast data returned by MemCache#stats. Patch #10505 submitted by Sebastian Delmont. = 1.4.0 * Fix bug #10371, #set does not check response for server errors. Submitted by Ben VandenBos. * Fix bug #12450, set TCP_NODELAY socket option. Patch by Chris McGrath. * Fix bug #10704, missing #add method. Patch by Jamie Macey. * Fix bug #10371, handle socket EOF in cache_get. Submitted by Ben VandenBos. = 1.3.0 * Apply patch #6507, add stats command. Submitted by Tyler Kovacs. * Apply patch #6509, parallel implementation of #get_multi. Submitted by Tyler Kovacs. * Validate keys. Disallow spaces in keys or keys that are too long. * Perform more validation of server responses. MemCache now reports errors if the socket was not in an expected state. (Please file bugs if you find some.) * Add #incr and #decr. * Add raw argument to #set and #get to retrieve #incr and #decr values. * Also put on MemCacheError when using Cache::get with block. * memcache.rb no longer sets $TESTING to a true value if it was previously defined. Bug #8213 by Matijs van Zuijlen. = 1.2.1 * Fix bug #7048, MemCache#servers= referenced changed local variable. Submitted by Justin Dossey. * Fix bug #7049, MemCache#initialize resets @buckets. Submitted by Justin Dossey. * Fix bug #6232, Make Cache::Get work with a block only when nil is returned. Submitted by Jon Evans. * Moved to the seattlerb project. = 1.2.0 NOTE: This version will store keys in different places than previous versions! Be prepared for some thrashing while memcached sorts itself out! * Fixed multithreaded operations, bug 5994 and 5989. Thanks to Blaine Cook, Erik Hetzner, Elliot Smith, Dave Myron (and possibly others I have forgotten). * Made memcached interoperable with other memcached libraries, bug 4509. Thanks to anonymous. * Added get_multi to match Perl/etc APIs = 1.1.0 * Added some tests * Sped up non-multithreaded and multithreaded operation * More Ruby-memcache compatibility * More RDoc * Switched to Hoe = 1.0.0 Birthday! memcache-client-1.8.5/lib/0000755000004100000410000000000011724022457015345 5ustar www-datawww-datamemcache-client-1.8.5/lib/memcache.rb0000644000004100000410000010430611724022457017440 0ustar www-datawww-data# encoding: utf-8 $TESTING = defined?($TESTING) && $TESTING require 'socket' require 'thread' require 'zlib' require 'digest/sha1' require 'net/protocol' require 'memcache/version' begin # Try to use the SystemTimer gem instead of Ruby's timeout library # when running on Ruby 1.8.x. See: # http://ph7spot.com/articles/system_timer # We don't want to bother trying to load SystemTimer on jruby, # ruby 1.9+ and rbx. if !defined?(RUBY_ENGINE) || (RUBY_ENGINE == 'ruby' && RUBY_VERSION < '1.9.0') require 'system_timer' MemCacheTimer = SystemTimer else require 'timeout' MemCacheTimer = Timeout end rescue LoadError => e require 'timeout' MemCacheTimer = Timeout end ## # A Ruby client library for memcached. # class MemCache ## # Default options for the cache object. DEFAULT_OPTIONS = { :namespace => nil, :readonly => false, :multithread => true, :failover => true, :timeout => 0.5, :logger => nil, :no_reply => false, :check_size => true, :autofix_keys => false, :namespace_separator => ':', } ## # Default memcached port. DEFAULT_PORT = 11211 ## # Default memcached server weight. DEFAULT_WEIGHT = 1 ## # The namespace for this instance attr_reader :namespace ## # The multithread setting for this instance attr_reader :multithread ## # Whether to try to fix keys that are too long and will be truncated by # using their SHA1 hash instead. # The hash is only used on keys longer than 250 characters, or containing spaces, # to avoid impacting performance unnecesarily. # # In theory, your code should generate correct keys when calling memcache, # so it's your responsibility and you should try to fix this problem at its source. # # But if that's not possible, enable this option and memcache-client will give you a hand. attr_reader :autofix_keys ## # The servers this client talks to. Play at your own peril. attr_reader :servers ## # Socket timeout limit with this client, defaults to 0.5 sec. # Set to nil to disable timeouts. attr_reader :timeout ## # Should the client try to failover to another server if the # first server is down? Defaults to true. attr_reader :failover ## # Log debug/info/warn/error to the given Logger, defaults to nil. attr_reader :logger ## # Don't send or look for a reply from the memcached server for write operations. # Please note this feature only works in memcached 1.2.5 and later. Earlier # versions will reply with "ERROR". attr_reader :no_reply ## # Accepts a list of +servers+ and a list of +opts+. +servers+ may be # omitted. See +servers=+ for acceptable server list arguments. # # Valid options for +opts+ are: # # [:namespace] Prepends this value to all keys added or retrieved. # [:readonly] Raises an exception on cache writes when true. # [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true. # [:failover] Should the client try to failover to another server if the # first server is down? Defaults to true. # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec, # set to nil to disable timeouts. # [:logger] Logger to use for info/debug output, defaults to nil # [:no_reply] Don't bother looking for a reply for write operations (i.e. they # become 'fire and forget'), memcached 1.2.5 and later only, speeds up # set/add/delete/incr/decr significantly. # [:check_size] Raises a MemCacheError if the value to be set is greater than 1 MB, which # is the maximum key size for the standard memcached server. Defaults to true. # [:autofix_keys] If a key is longer than 250 characters or contains spaces, # use an SHA1 hash instead, to prevent collisions on truncated keys. # Other options are ignored. def initialize(*args) servers = [] opts = {} case args.length when 0 then # NOP when 1 then arg = args.shift case arg when Hash then opts = arg when Array then servers = arg when String then servers = [arg] else raise ArgumentError, 'first argument must be Array, Hash or String' end when 2 then servers, opts = args else raise ArgumentError, "wrong number of arguments (#{args.length} for 2)" end @evented = defined?(EM) && EM.reactor_running? opts = DEFAULT_OPTIONS.merge opts @namespace = opts[:namespace] @readonly = opts[:readonly] @multithread = opts[:multithread] && !@evented @autofix_keys = opts[:autofix_keys] @timeout = opts[:timeout] @failover = opts[:failover] @logger = opts[:logger] @no_reply = opts[:no_reply] @check_size = opts[:check_size] @namespace_separator = opts[:namespace_separator] @mutex = Mutex.new if @multithread logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger Thread.current[:memcache_client] = self.object_id if !@multithread self.servers = servers end ## # Returns a string representation of the cache object. def inspect "" % [@servers.length, @namespace, @readonly] end ## # Returns whether there is at least one active server for the object. def active? not @servers.empty? end ## # Returns whether or not the cache object was created read only. def readonly? @readonly end ## # Set the servers that the requests will be distributed between. Entries # can be either strings of the form "hostname:port" or # "hostname:port:weight" or MemCache::Server objects. # def servers=(servers) # Create the server objects. @servers = Array(servers).collect do |server| case server when String host, port, weight = server.split ':', 3 port ||= DEFAULT_PORT weight ||= DEFAULT_WEIGHT Server.new self, host, port, weight else server end end logger.debug { "Servers now: #{@servers.inspect}" } if logger # There's no point in doing this if there's only one server @continuum = create_continuum_for(@servers) if @servers.size > 1 @servers end ## # Decrements the value for +key+ by +amount+ and returns the new value. # +key+ must already exist. If +key+ is not an integer, it is assumed to be # 0. +key+ can not be decremented below 0. def decr(key, amount = 1) raise MemCacheError, "Update of readonly cache" if @readonly with_server(key) do |server, cache_key| cache_decr server, cache_key, amount end rescue TypeError => err handle_error nil, err end ## # Retrieves +key+ from memcache. If +raw+ is false, the value will be # unmarshalled. def get(key, raw = false) with_server(key) do |server, cache_key| logger.debug { "get #{key} from #{server.inspect}" } if logger value = cache_get server, cache_key return nil if value.nil? value = Marshal.load value unless raw return value end rescue TypeError => err handle_error nil, err end ## # Performs a +get+ with the given +key+. If # the value does not exist and a block was given, # the block will be called and the result saved via +add+. # # If you do not provide a block, using this # method is the same as using +get+. # def fetch(key, expiry = 0, raw = false) value = get(key, raw) if value.nil? && block_given? value = yield add(key, value, expiry, raw) end value end ## # Retrieves multiple values from memcached in parallel, if possible. # # The memcached protocol supports the ability to retrieve multiple # keys in a single request. Pass in an array of keys to this method # and it will: # # 1. map the key to the appropriate memcached server # 2. send a single request to each server that has one or more key values # # Returns a hash of values. # # cache["a"] = 1 # cache["b"] = 2 # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 } # # Note that get_multi assumes the values are marshalled. You can pass # in :raw => true to bypass value marshalling. # # cache.get_multi('a', 'b', ..., :raw => true) def get_multi(*keys) raise MemCacheError, 'No active servers' unless active? opts = keys.last.is_a?(Hash) ? keys.pop : {} keys.flatten! key_count = keys.length cache_keys = {} server_keys = Hash.new { |h,k| h[k] = [] } # map keys to servers keys.each do |key| server, cache_key = request_setup key cache_keys[cache_key] = key server_keys[server] << cache_key end results = {} raw = opts[:raw] || false server_keys.each do |server, keys_for_server| keys_for_server_str = keys_for_server.join ' ' begin values = cache_get_multi server, keys_for_server_str values.each do |key, value| results[cache_keys[key]] = raw ? value : Marshal.load(value) end rescue IndexError => e # Ignore this server and try the others logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger end end return results rescue TypeError => err handle_error nil, err end ## # Increments the value for +key+ by +amount+ and returns the new value. # +key+ must already exist. If +key+ is not an integer, it is assumed to be # 0. def incr(key, amount = 1) raise MemCacheError, "Update of readonly cache" if @readonly with_server(key) do |server, cache_key| cache_incr server, cache_key, amount end rescue TypeError => err handle_error nil, err end ## # Add +key+ to the cache with value +value+ that expires in +expiry+ # seconds. If +raw+ is true, +value+ will not be Marshalled. # # Warning: Readers should not call this method in the event of a cache miss; # see MemCache#add. ONE_MB = 1024 * 1024 def set(key, value, expiry = 0, raw = false) raise MemCacheError, "Update of readonly cache" if @readonly value = Marshal.dump value unless raw with_server(key) do |server, cache_key| logger.debug { "set #{key} to #{server.inspect}: #{value.to_s.size}" } if logger if @check_size && value.to_s.size > ONE_MB raise MemCacheError, "Value too large, memcached can only store 1MB of data per key" end command = "set #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result if result.nil? server.close raise MemCacheError, "lost connection to #{server.host}:#{server.port}" end result end end end ## # "cas" is a check and set operation which means "store this data but # only if no one else has updated since I last fetched it." This can # be used as a form of optimistic locking. # # Works in block form like so: # cache.cas('some-key') do |value| # value + 1 # end # # Returns: # +nil+ if the value was not found on the memcached server. # +STORED+ if the value was updated successfully # +EXISTS+ if the value was updated by someone else since last fetch def cas(key, expiry=0, raw=false) raise MemCacheError, "Update of readonly cache" if @readonly raise MemCacheError, "A block is required" unless block_given? (value, token) = gets(key, raw) return nil unless value updated = yield value value = raw ? updated : Marshal.dump(updated) with_server(key) do |server, cache_key| logger.debug { "cas #{key} to #{server.inspect}: #{value.to_s.size}" } if logger command = "cas #{cache_key} 0 #{expiry} #{value.to_s.size} #{token}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result if result.nil? server.close raise MemCacheError, "lost connection to #{server.host}:#{server.port}" end result end end end ## # Add +key+ to the cache with value +value+ that expires in +expiry+ # seconds, but only if +key+ does not already exist in the cache. # If +raw+ is true, +value+ will not be Marshalled. # # Readers should call this method in the event of a cache miss, not # MemCache#set. def add(key, value, expiry = 0, raw = false) raise MemCacheError, "Update of readonly cache" if @readonly value = Marshal.dump value unless raw with_server(key) do |server, cache_key| logger.debug { "add #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger command = "add #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result result end end end ## # Add +key+ to the cache with value +value+ that expires in +expiry+ # seconds, but only if +key+ already exists in the cache. # If +raw+ is true, +value+ will not be Marshalled. def replace(key, value, expiry = 0, raw = false) raise MemCacheError, "Update of readonly cache" if @readonly value = Marshal.dump value unless raw with_server(key) do |server, cache_key| logger.debug { "replace #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger command = "replace #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result result end end end ## # Append - 'add this data to an existing key after existing data' # Please note the value is always passed to memcached as raw since it # doesn't make a lot of sense to concatenate marshalled data together. def append(key, value) raise MemCacheError, "Update of readonly cache" if @readonly with_server(key) do |server, cache_key| logger.debug { "append #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger command = "append #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result result end end end ## # Prepend - 'add this data to an existing key before existing data' # Please note the value is always passed to memcached as raw since it # doesn't make a lot of sense to concatenate marshalled data together. def prepend(key, value) raise MemCacheError, "Update of readonly cache" if @readonly with_server(key) do |server, cache_key| logger.debug { "prepend #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger command = "prepend #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n" with_socket_management(server) do |socket| socket.write command break nil if @no_reply result = socket.gets raise_on_error_response! result result end end end ## # Removes +key+ from the cache. # +expiry+ is ignored as it has been removed from the latest memcached version. def delete(key, expiry = 0) raise MemCacheError, "Update of readonly cache" if @readonly with_server(key) do |server, cache_key| with_socket_management(server) do |socket| logger.debug { "delete #{cache_key} on #{server}" } if logger socket.write "delete #{cache_key}#{noreply}\r\n" break nil if @no_reply result = socket.gets raise_on_error_response! result result end end end ## # Flush the cache from all memcache servers. # A non-zero value for +delay+ will ensure that the flush # is propogated slowly through your memcached server farm. # The Nth server will be flushed N*delay seconds from now, # asynchronously so this method returns quickly. # This prevents a huge database spike due to a total # flush all at once. def flush_all(delay=0) raise MemCacheError, 'No active servers' unless active? raise MemCacheError, "Update of readonly cache" if @readonly begin delay_time = 0 @servers.each do |server| with_socket_management(server) do |socket| logger.debug { "flush_all #{delay_time} on #{server}" } if logger if delay == 0 # older versions of memcached will fail silently otherwise socket.write "flush_all#{noreply}\r\n" else socket.write "flush_all #{delay_time}#{noreply}\r\n" end break nil if @no_reply result = socket.gets raise_on_error_response! result result end delay_time += delay end rescue IndexError => err handle_error nil, err end end ## # Reset the connection to all memcache servers. This should be called if # there is a problem with a cache lookup that might have left the connection # in a corrupted state. def reset @servers.each { |server| server.close } end ## # Returns statistics for each memcached server. An explanation of the # statistics can be found in the memcached docs: # # http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt # # Example: # # >> pp CACHE.stats # {"localhost:11211"=> # {"bytes"=>4718, # "pid"=>20188, # "connection_structures"=>4, # "time"=>1162278121, # "pointer_size"=>32, # "limit_maxbytes"=>67108864, # "cmd_get"=>14532, # "version"=>"1.2.0", # "bytes_written"=>432583, # "cmd_set"=>32, # "get_misses"=>0, # "total_connections"=>19, # "curr_connections"=>3, # "curr_items"=>4, # "uptime"=>1557, # "get_hits"=>14532, # "total_items"=>32, # "rusage_system"=>0.313952, # "rusage_user"=>0.119981, # "bytes_read"=>190619}} # => nil def stats raise MemCacheError, "No active servers" unless active? server_stats = {} @servers.each do |server| next unless server.alive? with_socket_management(server) do |socket| value = nil socket.write "stats\r\n" stats = {} while line = socket.gets do raise_on_error_response! line break if line == "END\r\n" if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then name, value = $1, $2 stats[name] = case name when 'version' value when 'rusage_user', 'rusage_system' then seconds, microseconds = value.split(/:/, 2) microseconds ||= 0 Float(seconds) + (Float(microseconds) / 1_000_000) else if value =~ /\A\d+\Z/ then value.to_i else value end end end end server_stats["#{server.host}:#{server.port}"] = stats end end raise MemCacheError, "No active servers" if server_stats.empty? server_stats end ## # Shortcut to get a value from the cache. alias [] get ## # Shortcut to save a value in the cache. This method does not set an # expiration on the entry. Use set to specify an explicit expiry. def []=(key, value) set key, value end protected unless $TESTING ## # Create a key for the cache, incorporating the namespace qualifier if # requested. def make_cache_key(key) if @autofix_keys && (key =~ /\s/ || key_length(key) > 250) key = "#{Digest::SHA1.hexdigest(key)}-autofixed" end if namespace.nil? key else "#{@namespace}#{@namespace_separator}#{key}" end end ## # Calculate length of the key, including the namespace and namespace-separator. def key_length(key) key.length + (namespace.nil? ? 0 : ( namespace.length + (@namespace_separator.nil? ? 0 : @namespace_separator.length) ) ) end ## # Returns an interoperable hash value for +key+. (I think, docs are # sketchy for down servers). def hash_for(key) Zlib.crc32(key) end ## # Pick a server to handle the request based on a hash of the key. def get_server_for_key(key, options = {}) raise ArgumentError, "illegal character in key #{key.inspect}" if key =~ /\s/ raise ArgumentError, "key cannot be blank" if key.nil? || key.strip.size == 0 raise ArgumentError, "key too long #{key.inspect}" if key.length > 250 raise MemCacheError, "No servers available" if @servers.empty? return @servers.first if @servers.length == 1 hkey = hash_for(key) 20.times do |try| entryidx = Continuum.binary_search(@continuum, hkey) server = @continuum[entryidx].server return server if server.alive? break unless failover hkey = hash_for "#{try}#{key}" end raise MemCacheError, "No servers available" end ## # Performs a raw decr for +cache_key+ from +server+. Returns nil if not # found. def cache_decr(server, cache_key, amount) with_socket_management(server) do |socket| socket.write "decr #{cache_key} #{amount}#{noreply}\r\n" break nil if @no_reply text = socket.gets raise_on_error_response! text return nil if text == "NOT_FOUND\r\n" return text.to_i end end ## # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache # miss. def cache_get(server, cache_key) with_socket_management(server) do |socket| socket.write "get #{cache_key}\r\n" keyline = socket.gets # "VALUE \r\n" if keyline.nil? then server.close raise MemCacheError, "lost connection to #{server.host}:#{server.port}" end raise_on_error_response! keyline return nil if keyline == "END\r\n" unless keyline =~ /(\d+)\r/ then server.close raise MemCacheError, "unexpected response #{keyline.inspect}" end value = socket.read $1.to_i socket.read 2 # "\r\n" socket.gets # "END\r\n" return value end end def gets(key, raw = false) with_server(key) do |server, cache_key| logger.debug { "gets #{key} from #{server.inspect}" } if logger result = with_socket_management(server) do |socket| socket.write "gets #{cache_key}\r\n" keyline = socket.gets # "VALUE \r\n" if keyline.nil? then server.close raise MemCacheError, "lost connection to #{server.host}:#{server.port}" end raise_on_error_response! keyline return nil if keyline == "END\r\n" unless keyline =~ /(\d+) (\w+)\r/ then server.close raise MemCacheError, "unexpected response #{keyline.inspect}" end value = socket.read $1.to_i socket.read 2 # "\r\n" socket.gets # "END\r\n" [value, $2] end result[0] = Marshal.load result[0] unless raw result end rescue TypeError => err handle_error nil, err end ## # Fetches +cache_keys+ from +server+ using a multi-get. def cache_get_multi(server, cache_keys) with_socket_management(server) do |socket| values = {} socket.write "get #{cache_keys}\r\n" while keyline = socket.gets do return values if keyline == "END\r\n" raise_on_error_response! keyline unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then server.close raise MemCacheError, "unexpected response #{keyline.inspect}" end key, data_length = $1, $3 values[$1] = socket.read data_length.to_i socket.read(2) # "\r\n" end server.close raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too end end ## # Performs a raw incr for +cache_key+ from +server+. Returns nil if not # found. def cache_incr(server, cache_key, amount) with_socket_management(server) do |socket| socket.write "incr #{cache_key} #{amount}#{noreply}\r\n" break nil if @no_reply text = socket.gets raise_on_error_response! text return nil if text == "NOT_FOUND\r\n" return text.to_i end end ## # Gets or creates a socket connected to the given server, and yields it # to the block, wrapped in a mutex synchronization if @multithread is true. # # If a socket error (SocketError, SystemCallError, IOError) or protocol error # (MemCacheError) is raised by the block, closes the socket, attempts to # connect again, and retries the block (once). If an error is again raised, # reraises it as MemCacheError. # # If unable to connect to the server (or if in the reconnect wait period), # raises MemCacheError. Note that the socket connect code marks a server # dead for a timeout period, so retrying does not apply to connection attempt # failures (but does still apply to unexpectedly lost connections etc.). def with_socket_management(server, &block) check_multithread_status! @mutex.lock if @multithread retried = false begin socket = server.socket # Raise an IndexError to show this server is out of whack. If were inside # a with_server block, we'll catch it and attempt to restart the operation. raise IndexError, "No connection to server (#{server.status})" if socket.nil? block.call(socket) rescue SocketError, Errno::EAGAIN, Timeout::Error => err logger.warn { "Socket failure: #{err.message}" } if logger server.mark_dead(err) handle_error(server, err) rescue MemCacheError, SystemCallError, IOError => err logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger handle_error(server, err) if retried || socket.nil? retried = true retry end ensure @mutex.unlock if @multithread end def with_server(key) retried = false begin server, cache_key = request_setup(key) yield server, cache_key rescue IndexError => e logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger if !retried && @servers.size > 1 logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger retried = true retry end handle_error(nil, e) end end ## # Handles +error+ from +server+. def handle_error(server, error) raise error if error.is_a?(MemCacheError) server.close if server && server.status == "CONNECTED" new_error = MemCacheError.new error.message new_error.set_backtrace error.backtrace raise new_error end def noreply @no_reply ? ' noreply' : '' end ## # Performs setup for making a request with +key+ from memcached. Returns # the server to fetch the key from and the complete key to use. def request_setup(key) raise MemCacheError, 'No active servers' unless active? cache_key = make_cache_key key server = get_server_for_key cache_key return server, cache_key end def raise_on_error_response!(response) if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/ raise MemCacheError, $1.strip end end def create_continuum_for(servers) total_weight = servers.inject(0) { |memo, srv| memo + srv.weight } continuum = [] servers.each do |server| entry_count_for(server, servers.size, total_weight).times do |idx| hash = Digest::SHA1.hexdigest("#{server.host}:#{server.port}:#{idx}") value = Integer("0x#{hash[0..7]}") continuum << Continuum::Entry.new(value, server) end end continuum.sort { |a, b| a.value <=> b.value } end def entry_count_for(server, total_servers, total_weight) ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor end def check_multithread_status! return if @multithread return if @evented if Thread.current[:memcache_client] != self.object_id raise MemCacheError, <<-EOM You are accessing this memcache-client instance from multiple threads but have not enabled multithread support. Normally: MemCache.new(['localhost:11211'], :multithread => true) In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }] EOM end end ## # This class represents a memcached server instance. class Server ## # The amount of time to wait before attempting to re-establish a # connection with a server that is marked dead. RETRY_DELAY = 30.0 ## # The host the memcached server is running on. attr_reader :host ## # The port the memcached server is listening on. attr_reader :port ## # The weight given to the server. attr_reader :weight ## # The time of next retry if the connection is dead. attr_reader :retry ## # A text status string describing the state of the server. attr_reader :status attr_reader :logger ## # Create a new MemCache::Server object for the memcached instance # listening on the given host and port, weighted by the given weight. def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT) raise ArgumentError, "No host specified" if host.nil? or host.empty? raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero? @host = host @port = port.to_i @weight = weight.to_i @sock = nil @retry = nil @status = 'NOT CONNECTED' @timeout = memcache.timeout @logger = memcache.logger if defined?(EM) and EM.reactor_running? and defined?(MemCache::EventedServer) self.extend(MemCache::EventedServer) end end ## # Return a string representation of the server object. def inspect "" % [@host, @port, @weight, @status] end ## # Check whether the server connection is alive. This will cause the # socket to attempt to connect if it isn't already connected and or if # the server was previously marked as down and the retry time has # been exceeded. def alive? !!socket end ## # Try to connect to the memcached server targeted by this object. # Returns the connected socket object on success or nil on failure. def socket return @sock if @sock and not @sock.closed? @sock = nil # If the host was dead, don't retry for a while. return if @retry and @retry > Time.now # Attempt to connect if not already connected. begin @sock = connect_to(@host, @port, @timeout) @sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1 @retry = nil @status = 'CONNECTED' rescue SocketError, SystemCallError, IOError, Timeout::Error => err logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger mark_dead err end return @sock end def connect_to(host, port, timeout=nil) sock = nil if timeout MemCacheTimer.timeout(timeout) do sock = TCPSocket.new(host, port) end else sock = TCPSocket.new(host, port) end io = MemCache::BufferedIO.new(sock) io.read_timeout = timeout # Getting reports from several customers, including 37signals, # that the non-blocking timeouts in 1.7.5 don't seem to be reliable. # It can't hurt to set the underlying socket timeout also, if possible. if timeout secs = Integer(timeout) usecs = Integer((timeout - secs) * 1_000_000) optval = [secs, usecs].pack("l_2") begin io.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval io.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval rescue Exception => ex # Solaris, for one, does not like/support socket timeouts. @logger.info "[memcache-client] Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger end end io end ## # Close the connection to the memcached server targeted by this # object. The server is not considered dead. def close @sock.close if @sock && !@sock.closed? @sock = nil @retry = nil @status = "NOT CONNECTED" end ## # Mark the server as dead and close its socket. def mark_dead(error) close @retry = Time.now + RETRY_DELAY reason = "#{error.class.name}: #{error.message}" @status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry @logger.info { @status } if @logger end end ## # Base MemCache exception class. class MemCacheError < RuntimeError; end class BufferedIO < Net::BufferedIO # :nodoc: BUFSIZE = 1024 * 16 if RUBY_VERSION < '1.9.1' def rbuf_fill begin @rbuf << @io.read_nonblock(BUFSIZE) rescue Errno::EWOULDBLOCK retry unless @read_timeout if IO.select([@io], nil, nil, @read_timeout) retry else raise Timeout::Error, 'IO timeout' end end end end def setsockopt(*args) @io.setsockopt(*args) end def gets readuntil("\n") end end end module Continuum POINTS_PER_SERVER = 160 # this is the default in libmemcached # Find the closest index in Continuum with value <= the given value def self.binary_search(ary, value, &block) upper = ary.size - 1 lower = 0 idx = 0 while(lower <= upper) do idx = (lower + upper) / 2 comp = ary[idx].value <=> value if comp == 0 return idx elsif comp > 0 upper = idx - 1 else lower = idx + 1 end end return upper end class Entry attr_reader :value attr_reader :server def initialize(val, srv) @value = val @server = srv end def inspect "<#{value}, #{server.host}:#{server.port}>" end end end require 'continuum_native' memcache-client-1.8.5/lib/memcache/0000755000004100000410000000000011724022457017107 5ustar www-datawww-datamemcache-client-1.8.5/lib/memcache/version.rb0000644000004100000410000000012711724022457021121 0ustar www-datawww-dataclass MemCache ## # The version of MemCache you are using. VERSION = "1.8.5" endmemcache-client-1.8.5/lib/memcache/event_machine.rb0000644000004100000410000000671711724022457022254 0ustar www-datawww-data# Extensions for using memcache-client with EventMachine raise "memcache/event_machine requires Ruby 1.9" if RUBY_VERSION < '1.9' require 'memcache' require 'eventmachine' require 'fiber' class MemCache # Since we are working in a single Thread, multiple Fiber environment, # disable the multithread Mutex as it will not work. # DEFAULT_OPTIONS[:multithread] = false module EventedServer def fiber_key @fiber_key ||= "memcached-#{@host}-#{@port}" end def socket sock = Thread.current[fiber_key] return sock if sock and not sock.closed? Thread.current[fiber_key] = nil # If the host was dead, don't retry for a while. return if @retry and @retry > Time.now Thread.current[fiber_key] ||= begin sock = EM::SocketConnection.connect(@host, @port, @timeout) yielding = true fiber = Fiber.current sock.callback do @status = 'CONNECTED' @retry = nil yielding = false fiber.resume if Fiber.current != fiber end sock.errback do sock = nil yielding = false fiber.resume if Fiber.current != fiber end Fiber.yield if yielding sock end end def close sock = Thread.current[fiber_key] if sock sock.close if !sock.closed? Thread.current[fiber_key] = nil end @retry = nil @status = "NOT CONNECTED" end end end module EM module SocketConnection include EM::Deferrable def self.connect(host, port, timeout) EM.connect(host, port, self) do |conn| conn.pending_connect_timeout = timeout end end def initialize @connected = false @index = 0 @buf = '' end def closed? !@connected end def close @connected = false close_connection(true) end def write(buf) send_data(buf) end def read(size) if can_read?(size) yank(size) else fiber = Fiber.current @size = size @callback = proc { |data| fiber.resume(data) } # TODO Can leak fiber if the connection dies while # this fiber is yielded, waiting for data Fiber.yield end end SEP = "\r\n" def gets while true # Read to ensure we have some data in the buffer line = read(2) # Reset the buffer index to zero @buf = @buf.slice(@index..-1) @index = 0 if eol = @buf.index(SEP) line << yank(eol + SEP.size) break else # EOL not in the current buffer line << yank(@buf.size) end end line end def can_read?(size) @buf.size >= @index + size end # EM callbacks def receive_data(data) @buf << data if @callback and can_read?(@size) callback = @callback data = yank(@size) @callback = @size = nil callback.call(data) end end def post_init @connected = true succeed end def unbind if @connected @connected = false else fail end end private BUFFER_SIZE = 4096 def yank(len) data = @buf.slice(@index, len) @index += len @index = @buf.size if @index > @buf.size if @index >= BUFFER_SIZE @buf = @buf.slice(@index..-1) @index = 0 end data end end endmemcache-client-1.8.5/lib/memcache_util.rb0000644000004100000410000000554411724022457020501 0ustar www-datawww-data## # A utility wrapper around the MemCache client to simplify cache access. All # methods silently ignore MemCache errors. # # This API is deprecated, please use the Rails.cache API or your own wrapper API # around MemCache. module Cache ## # Try to return a logger object that does not rely # on ActiveRecord for logging. def self.logger @logger ||= if defined? Rails.logger # Rails 2.1 + Rails.logger elsif defined? RAILS_DEFAULT_LOGGER # Rails 1.2.2 + RAILS_DEFAULT_LOGGER else ActiveRecord::Base.logger # ... very old Rails. end end ## # Returns the object at +key+ from the cache if successful, or nil if either # the object is not in the cache or if there was an error attermpting to # access the cache. # # If there is a cache miss and a block is given the result of the block will # be stored in the cache with optional +expiry+, using the +add+ method rather # than +set+. def self.get(key, expiry = 0) start_time = Time.now value = CACHE.get key elapsed = Time.now - start_time logger.debug('MemCache Get (%0.6f) %s' % [elapsed, key]) if value.nil? and block_given? then value = yield add key, value, expiry end value rescue MemCache::MemCacheError => err logger.debug "MemCache Error: #{err.message}" if block_given? then value = yield put key, value, expiry end value end ## # Sets +value+ in the cache at +key+, with an optional +expiry+ time in # seconds. def self.put(key, value, expiry = 0) start_time = Time.now CACHE.set key, value, expiry elapsed = Time.now - start_time logger.debug('MemCache Set (%0.6f) %s' % [elapsed, key]) value rescue MemCache::MemCacheError => err ActiveRecord::Base.logger.debug "MemCache Error: #{err.message}" nil end ## # Sets +value+ in the cache at +key+, with an optional +expiry+ time in # seconds. If +key+ already exists in cache, returns nil. def self.add(key, value, expiry = 0) start_time = Time.now response = CACHE.add key, value, expiry elapsed = Time.now - start_time logger.debug('MemCache Add (%0.6f) %s' % [elapsed, key]) (response == "STORED\r\n") ? value : nil rescue MemCache::MemCacheError => err ActiveRecord::Base.logger.debug "MemCache Error: #{err.message}" nil end ## # Deletes +key+ from the cache in +delay+ seconds. def self.delete(key, delay = nil) start_time = Time.now CACHE.delete key, delay elapsed = Time.now - start_time logger.debug('MemCache Delete (%0.6f) %s' % [elapsed, key]) nil rescue MemCache::MemCacheError => err logger.debug "MemCache Error: #{err.message}" nil end ## # Resets all connections to MemCache servers. def self.reset CACHE.reset logger.debug 'MemCache Connections Reset' nil end end memcache-client-1.8.5/lib/continuum_native.rb0000644000004100000410000000217511724022457021266 0ustar www-datawww-datamodule Continuum class << self # Native extension to perform the binary search within the continuum # space. There's a pure ruby version in memcache.rb so this is purely # optional for performance and only necessary if you are using multiple # memcached servers. begin require 'inline' inline do |builder| builder.c <<-EOM int binary_search(VALUE ary, unsigned int r) { int upper = RARRAY_LEN(ary) - 1; int lower = 0; int idx = 0; ID value = rb_intern("value"); while (lower <= upper) { idx = (lower + upper) / 2; VALUE continuumValue = rb_funcall(RARRAY_PTR(ary)[idx], value, 0); unsigned int l = NUM2UINT(continuumValue); if (l == r) { return idx; } else if (l > r) { upper = idx - 1; } else { lower = idx + 1; } } return upper; } EOM end rescue Exception => e end end end