em-mongo-0.5.1/0000755000004100000410000000000012313272263013264 5ustar www-datawww-dataem-mongo-0.5.1/Rakefile0000644000004100000410000000631412313272263014735 0ustar www-datawww-data require 'fileutils' require 'tmpdir' def em_mongo_version File.read("VERSION").strip end def root_dir File.dirname(__FILE__) end task :default => 'spec:integration:default' class MongoRunner def self.run(options={}) auth = "--auth" if options[:auth] dir = Dir.tmpdir + "/em-mongo-tests-#$$" FileUtils.mkdir dir pidf = "#{dir}/mongod.pid" logf = "#{dir}/mongo.log" begin system "mongod run #{auth} --fork -vvvvvvv --dbpath #{dir} --pidfilepath #{pidf} --logpath #{logf} >> /dev/null " yield if block_given? ensure if File.exists?(pidf) and File.read(pidf).to_i != 0 Process.kill("KILL", File.read(pidf).to_i) FileUtils.rm_r dir unless options[:noclean] end end end end spec = eval(File.read('em-mongo.gemspec')) namespace :bundle do task :install do if `bundle check` =~ /bundle install/ system("bundle install --path vendor/gems") end end end namespace :gem do desc "build gem" task :build do puts "Building em-mongo #{em_mongo_version}" system "gem build em-mongo.gemspec -q" end desc "release gem" task :release do system "gem push em-mongo-#{em_mongo_version}.gem" end end namespace :spec do namespace :gem do desc "bundler tests" task :bundler do MongoRunner.run do print "Testing Bundler integration ... " if system "cd spec/gem && bundle install --quiet && ./bundler.rb" puts "SUCCESS." else puts "FAILURE." end end end desc "rubygems tests" task :rubygems do MongoRunner.run do print "Testing Rubygems integration ... " steps =[] steps << "cd spec/gem" if `gem list -i em-mongo` == 'true' steps << "gem uninstall --force -a em-mongo >/dev/null" end steps << "gem install #{root_dir}/em-mongo-#{em_mongo_version}.gem >/dev/null " steps << "./rubygems.rb" if system steps.join(" && ") puts "SUCCESS." else puts "FAILURE." end end end desc "all gem tests" task :all => [:bundler, :rubygems] end namespace :integration do desc "default tests" task :default => ['bundle:install'] do MongoRunner.run do files = spec.test_files.select{|x| x =~ /integration/}.join(' ') system "bundle exec spec #{files} -t -b -fs -color" end end desc "exhaustive tests" task :exhaustive => ['bundle:install'] do MongoRunner.run({:noclean => true}) do files = spec.test_files.select{|x| x =~ /integration/}.join(' ') system "bundle exec spec #{files} -t -b -fs -color" end MongoRunner.run({:auth => true}) do files = spec.test_files.select{|x| x =~ /integration/}.join(' ') system "bundle exec spec #{files} -t -b -fs -color" end end desc "default tests, but don't start mongodb for me" task :no_mongo => ['bundle:install'] do files = spec.test_files.select{|x| x =~ /integration/}.join(' ') system "bundle exec spec #{files} -t -b -fs -color" end end desc "release testing" task :release => ['spec:integration:default','gem:build','spec:gem:bundler','spec:gem:rubygems'] end em-mongo-0.5.1/CHANGELOG0000644000004100000410000000556312313272263014507 0ustar www-datawww-data- 0.5.1 * fixed bson dependency (do not use 2.x) * fixed test running * removed bash only expression from gemspec - 0.5.0 * debian packaging changes (https://github.com/bcg/em-mongo/pull/53) * added new 2d indexes EM::Mongo::FLAT2D, EM::Mongo::SPHERE2D (https://github.com/bcg/em-mongo/pull/49) - 0.4.3 * ensure new buffers aren't created when not needed (FlockOfBirds) #40 - 0.4.2 * defer_as_a instead to to_a * bugger optimizations * Jruby fixes - 0.4.1 * fix collection response value on success (pawelpacana) * whitepsace (pawelpacana) - 0.4.0 * added a Cursor implementation * replaced raw callbacks in the API with deferrables * removed CRUD methods from the Connection * added a variety of server side aggregation commands * added support for indexes * added safe update methods - 0.3.6 * dj2: fixes reconnects. (see: https://github.com/bcg/em-mongo/pull/23) - 0.3.5 * gvarela: added orderby functionality based on the mongo-ruby-driver. (https://github.com/bcg/em-mongo/pull/19) - 0.3.4 * Silly regression issue. Added em requires to tests and so I missed it when I yanked it from lib/em-mongo.rb * dynamix found *another* regression in the new database code. (see: https://github.com/bcg/em-mongo/pull/16) - 0.3.3 * Yank Bundler out lib/em-mongo.rb and push it into spec/spec_helper.rb * Numerous Rakefile changes - 0.3.2 * Added spec/gem to help with release testing * Added some Rakefile helpers - 0.3.1 * Added VERSION to the gem. It was breaking the new EM::Mongo::Version module. - 0.3 * Refactored test suite * Followed ruby-mongo-driver's _id convention. This will BREAK your apps. See: http://twitter.com/#!/brendengrace/status/9445253316608000 * Split out database.rb and pulled in support.rb and conversons.rb from ruby-mongo-driver * Added initial authentication support to to EM::Mongo::Database - 0.2.14 * .close was reconnecting automatically so now its configureable - 0.2.12 * Use BSON::ObjectID for _id instead of String (gaffneyc). = 0.2.11 * Auto Reconnect on lost connection (dj2) * Options handling on Em.connect (dj2) * Bundler support (pkieltyka) = 0.2.10 * Ruby 1.8 was broken in the last set of patches pulled in. = 0.2.9 * Performance enhacements by merrells * Cleanups by jarib = 0.2.7 * Fixes modifier issues in update. BSON would not allow '$' with key checks. (http://github.com/bcg/em-mongo/issues/3) = 0.2.6 * 'limit' was broken in find because of poor protocol parsing. (http://github.com/bcg/em-mongo/issues/1) = 0.2.5 * collection.update = 0.2.4 * Remove support for symbols, needs to be enforced still * Remove lib/buffer.rb and all custom BSON parsing * Remove UUID dependency (replaced by BSON::ObjectID.new) * Fixes a BufferOverflow because of misunderstood types = 0.1.1 - Initial Relase * Rspec tests * Fixes a BufferOverflow with large Hashes * Fork of RMongo em-mongo-0.5.1/em-mongo.gemspec0000755000004100000410000000134712313272263016357 0ustar www-datawww-dataversion = File.read("VERSION").strip Gem::Specification.new do |s| s.name = 'em-mongo' s.version = version s.authors = ['bcg', 'PlasticLizard'] s.email = 'brenden.grace@gmail.com' s.date = "2010-12-01" s.description = 'EventMachine driver for MongoDB.' s.homepage = 'https://github.com/bcg/em-mongo' s.rubyforge_project = 'em-mongo' s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- spec/*`.split("\n") s.extra_rdoc_files = ["README.rdoc"] s.rdoc_options = ["--charset=UTF-8"] s.require_paths = ["lib"] s.rubygems_version = '1.3.6' s.summary = 'An EventMachine driver for MongoDB.' s.add_dependency 'eventmachine', ['>= 0.12.10'] s.add_dependency "bson", ["~> 1.9.2"] end em-mongo-0.5.1/Gemfile0000644000004100000410000000026012313272263014555 0ustar www-datawww-datasource 'https://rubygems.org' gemspec group :development do gem "rspec", "< 1.3" # gem "em-spec", :git => "https://github.com/mloughran/em-spec.git" gem "em-spec" end em-mongo-0.5.1/examples/0000755000004100000410000000000012313272263015102 5ustar www-datawww-dataem-mongo-0.5.1/examples/readme.rb0000644000004100000410000000324212313272263016665 0ustar www-datawww-data#bundle exec ruby examples/readme.rb require 'em-mongo' require 'eventmachine' EM.run do db = EM::Mongo::Connection.new('localhost').db('my_database') collection = db.collection('my_collection') EM.next_tick do (1..10).each do |i| collection.insert( { :revolution => i } ) end #find returns an EM::Mongo::Cursor cursor = collection.find #most cursor methods return an EM::Mongo::RequestResponse, #which is an EventMachine::Deferrable resp = cursor.to_a #when em-mongo IO methods succeed, they #will always call back with the return #value you would have expected from the #synchronous version of the method from #the mongo-ruby-driver resp.callback do |documents| puts "I just got #{documents.length} documents! I'm really cool!" end #when em-mongo IO methods fail, they #errback with an array in the form #[ErrorClass, "error message"] resp.errback do |err| raise *err end #iterate though each result in a query collection.find( :revolution => { "$gt" => 5 } ).limit(1).skip(1).each do |doc| #unlike the mongo-ruby-driver, each returns null at the end of the cursor if doc puts "Revolution ##{doc['revolution']}" end end #add an index collection.create_index [[:revolution, -1]] #insert a document and ensure it gets written save_resp = collection.safe_save( { :hi => "there" }, :last_error_params => {:fsync=>true} ) save_resp.callback { puts "Hi is there, let us give thanks" } save_resp.errback { |err| puts "AAAAAAAAAAAAAAAARGH! Oh why! WHY!?!?!" } collection.drop EM.add_periodic_timer(1) { EM.stop } end endem-mongo-0.5.1/examples/legacy.rb0000644000004100000410000000167712313272263016706 0ustar www-datawww-data#bundle exec ruby examples/legacy.rb require 'em-mongo' require 'em-mongo/prev.rb' require 'eventmachine' EM.run do conn = EM::Mongo::Connection.new('localhost') db = conn.db('my_database') collection = db.collection('my_collection') EM.next_tick do (1..10).each do |i| conn.insert('my_database.my_collection', { :revolution => i } ) end conn.update('my_database.my_collection', {:revolution => 9}, {:revolution => 8.5}) conn.delete('my_database.my_collection', {:revolution => 1}) collection.find do |documents| puts "I just got #{documents.length} documents! I'm really cool!" end #iterate though each result in a query collection.find( {:revolution => { "$gt" => 5 }}, :limit =>1, :skip => 1, :order => [:revolution, -1]) do |docs| docs.each do |doc| puts "Revolution ##{doc['revolution']}" end end collection.drop EM.add_periodic_timer(1) { EM.stop } end endem-mongo-0.5.1/spec/0000755000004100000410000000000012313272263014216 5ustar www-datawww-dataem-mongo-0.5.1/spec/spec_helper.rb0000644000004100000410000000127212313272263017036 0ustar www-datawww-datarequire "rubygems" require "bundler" Bundler.setup(:default, :development) require "eventmachine" begin require "bson_ext" rescue LoadError require "bson" end require File.expand_path('../lib/em-mongo', File.dirname(__FILE__)) require "em-spec/rspec" def connection_and_collection(collection_name=EM::Mongo::DEFAULT_NS) conn = EMMongo::Connection.new return conn, collection(conn, collection_name) end def collection(conn, name) conn.db.collection(name).remove conn.db.collection(name) end def number_hash @numbers = { 1 => 'one', 2 => 'two', 3 => 'three', 4 => 'four', 5 => 'five', 6 => 'six', 7 => 'seven', 8 => 'eight', 9 => 'nine' } end em-mongo-0.5.1/spec/integration/0000755000004100000410000000000012313272263016541 5ustar www-datawww-dataem-mongo-0.5.1/spec/integration/cursor_spec.rb0000644000004100000410000002235412313272263021423 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') describe EMMongo::Cursor do include EM::Spec it 'should describe itself via inspect' do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new( @coll, :selector => {'a' => 1} ) cursor.inspect.should == "" done end it 'should explain itself' do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll, :selector => {'a' => 1} ) cursor.explain.callback do |explanation| explanation['cursor'].should_not be_nil explanation['n'].should be_kind_of Numeric explanation['millis'].should be_kind_of Numeric explanation['nscanned'].should be_kind_of Numeric done end end it "should allow limit and skip to be chained" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) all = [] 10.times do |i| all << {"x" => i} @coll.save(all[-1]) end cursor.limit(5).skip(3).sort("x",1).defer_as_a.callback do |results| all.slice(3...8).each_with_index do |item,idx| results[idx]["x"].should == item["x"] end done end end it "should allow a limit larger than the batch size" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll, :selector => {}) all = [] 1501.times do |i| @coll.insert(i.to_s => i.to_s) end cursor.limit(1500).defer_as_a.callback do |docs| docs.length.should == 1500 done end end it "should say if it has next" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) 1.times do |i| @coll.save("x" => 1) end cursor.has_next?.callback do |result| result.should be_true cursor.next_document.callback do |doc| cursor.has_next?.callback do |result| result.should be_false done end end end end it "should rewind" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) 100.times do |i| @coll.save("x" => 1) end cursor.defer_as_a.callback do |r1| r1.length.should == 100 cursor.defer_as_a.callback do |r2| r2.length.should == 0 cursor.rewind! cursor.defer_as_a.callback do |r3| r3.length.should == 100 done end end end end describe "Get More" do it "should refill via get more" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) 1000.times do |i| @coll.save("x" => 1) end cursor.defer_as_a.callback do |results| results.length.should == 1000 done end end end describe "Count" do it 'should count 0 records in a empty collection' do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) cursor.count.callback do |c| c.should == 0 done end end it "should count records in a collection" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll) 10.times do |i| @coll.save("x" => 1) end cursor.count.callback do |c| c.should == 10 done end end it "should ignore skip and limit by default" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll).skip(5).limit(5) 10.times do |i| @coll.save("x" => i) end cursor.count.callback do |c| c.should == 10 done end end it "should account for skip when requested" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll).limit(5) 10.times do |i| @coll.save("x" => i) end cursor.count(true).callback do |c| c.should == 5 done end end it "should account for skip when requested" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll).skip(5) 10.times do |i| @coll.save("x" => i) end cursor.count(true).callback do |c| c.should == 5 done end end it "should count based on a simple selector" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll, :selector => {"x"=>1}) 10.times do |i| @coll.save("x" => i) end cursor.count(true).callback do |c| c.should == 1 done end end it "should count based on a selector with an operator" do @conn, @coll = connection_and_collection cursor = EM::Mongo::Cursor.new(@coll, :selector => {"x"=>{"$lt"=>5}}) 10.times do |i| @coll.save("x" => i) end cursor.count(true).callback do |c| c.should == 5 done end end it "should count a non-existing collection as 0 without vomiting blood" do @conn, @coll = connection_and_collection @coll = @conn.db.collection('imnotreallyheredontlookatme') cursor = EM::Mongo::Cursor.new(@coll) cursor.count(true).callback do |c| c.should == 0 done end end end describe "Sort" do it "should sort ascending" do @conn, @coll = connection_and_collection 5.times do |i| @coll.save("x" => i) end cursor = EM::Mongo::Cursor.new(@coll).sort(:x, 1) cursor.next_document.callback do |first| first["x"].should == 0 done end end it "should sort descending" do @conn, @coll = connection_and_collection 5.times do |i| @coll.save("x" => i) end cursor = EM::Mongo::Cursor.new(@coll).sort(:x, -1) cursor.next_document.callback do |first| first["x"].should == 4 done end end it "should sort descending using a symbol sort dir" do @conn, @coll = connection_and_collection 5.times do |i| @coll.save("x" => i) end cursor = EM::Mongo::Cursor.new(@coll).sort(["x", :desc]) cursor.next_document.callback do |first| first["x"].should == 4 done end end it "should not allow sort to be called on an executed cursor" do @conn, @coll = connection_and_collection 5.times do |i| @coll.save("x" => i) end cursor = EM::Mongo::Cursor.new(@coll).sort(["x", :desc]) cursor.next_document.callback do |first| lambda { cursor.sort("x",1) }.should raise_error EM::Mongo::InvalidOperation done end end it "should sort by dates" do @conn, @coll = connection_and_collection 5.times do |i| @coll.insert("x" => Time.utc(2000 + i)) end cursor = EM::Mongo::Cursor.new(@coll).sort(["x", :desc]) cursor.next_document.callback do |first| first["x"].year.should == 2004 done end end describe "Each" do it "should iterate through each doc, returning null when done" do @conn, @coll = connection_and_collection 5.times do |i| @coll.insert("x" => i) end cursor = EM::Mongo::Cursor.new(@coll) counter = 0 cursor.each do |doc| if doc counter+=1 else counter.should == 5 done end end end end describe "defer_as_a" do it "should return an array of all documents in a query" do @conn, @coll = connection_and_collection 5.times do |i| @coll.insert("x" => i) end cursor = EM::Mongo::Cursor.new(@coll).sort("x",1) cursor.defer_as_a.callback do |docs| docs.length.should == 5 5.times do |i| docs[i]["x"].should == i end done end end end describe "Transformer (a robot in disguise)" do it "should set the transformer when passed in the constructor" do @conn, @coll = connection_and_collection transformer = Proc.new {|doc|doc} cursor = EM::Mongo::Cursor.new(@coll, :transformer => transformer) cursor.transformer.should == transformer done end it "should transform docs with next" do @conn, @coll = connection_and_collection @coll.insert({:a=>1}) klass = Struct.new(:id,:a) transformer = Proc.new {|doc|klass.new(doc['_id'],doc['a'])} cursor = EM::Mongo::Cursor.new(@coll, :transformer => transformer) cursor.next.callback do |doc| doc.should be_kind_of klass doc.id.should be_kind_of BSON::ObjectId doc.a.should == 1 done end end it "should transform docs with each" do @conn, @coll = connection_and_collection @coll.insert({:a=>1}) klass = Struct.new(:id, :a) transformer = Proc.new { |doc| klass.new(doc['_id'], doc['a']) } cursor = EM::Mongo::Cursor.new(@coll, :transformer => transformer) cursor.each do |doc| if doc doc.should be_kind_of klass doc.id.should be_kind_of BSON::ObjectId doc.a.should == 1 end done end end end end end em-mongo-0.5.1/spec/integration/connection_spec.rb0000644000004100000410000000206412313272263022241 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') describe EMMongo::Connection do include EM::Spec it 'should connect' do @conn = EMMongo::Connection.new EM.add_timer(0.1) do @conn.should be_connected done end end it 'should close' do @conn = EMMongo::Connection.new EM.add_timer(0.1) do @conn.should be_connected @conn.close end EM.add_timer(0.2) do EM.next_tick do @conn.should_not be_connected done end end end it 'should reconnect' do @conn = EMMongo::Connection.new(EM::Mongo::DEFAULT_IP, EM::Mongo::DEFAULT_PORT, nil, {:reconnect_in => 0.5}) EM.add_timer(0.1) do @conn.close end EM.add_timer(0.9) do @conn.should be_connected done end end it 'should instantiate a Database' do @conn = EMMongo::Connection.new db1 = @conn.db db1.should be_kind_of(EM::Mongo::Database) db2 = @conn.db('db2') db2.should be_kind_of(EM::Mongo::Database) db2.should_not == db1 done end end em-mongo-0.5.1/spec/integration/request_response_spec.rb0000644000004100000410000000321412313272263023506 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') describe EM::Mongo::RequestResponse do before :each do @response = EM::Mongo::RequestResponse.new end context "when first initialized" do it "should not be complete" do @response.completed?.should be_false end it "should not have succeeded" do @response.succeeded?.should be_false end it "should not have failed" do @response.failed?.should be_false end it "should not have any data" do @response.data.should be_nil end it "should not have any error" do @response.error.should be_nil end end context "when succeeded" do before(:each) { @response.succeed [:some,:data] } it "should have completed" do @response.completed?.should be_true end it "should have succeeded" do @response.succeeded?.should be_true end it "should not have failed" do @response.failed?.should be_false end it "should have data" do @response.data.should == [:some, :data] end it "should not have an error" do @response.error.should be_nil end end context "when failed" do before(:each) { @response.fail [RuntimeError, "crap!"]} it "should have completed" do @response.completed?.should be_true end it "should not have succeeded" do @response.succeeded?.should be_false end it "should have failed" do @response.failed?.should be_true end it "should not have data" do @response.data.should be_nil end it "should have an error" do @response.error.should == [RuntimeError, "crap!"] end end endem-mongo-0.5.1/spec/integration/collection_spec.rb0000644000004100000410000005644412313272263022250 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') describe EMMongo::Collection do include EM::Spec it 'should return a sub collection via the indexer method' do @conn, @coll = connection_and_collection @coll["child"].name.should == "#{@coll.name}.child" done end it "should drop the collection" do @conn, @coll = connection_and_collection @coll.insert({:x => "x"}) @coll.drop.callback do @coll.db.collection_names.callback do |names| names.should_not include @ns done end end end describe "find" do it 'should return a cursor' do @conn, @coll = connection_and_collection cursor = @coll.find(:hi=>"there") cursor.should be_a_kind_of(EM::Mongo::Cursor) done end it 'should find an object by attribute' do @conn, @coll = connection_and_collection @coll.insert("hello" => 'world') @coll.find({"hello" => "world"},{}).defer_as_a.callback do |res| res.size.should >= 1 res[0]["hello"].should == "world" done end end it 'should take strings or symbols for hashes' do @conn, @coll = connection_and_collection obj = @coll.insert({:_id => 1234, 'foo' => 'bar', :hello => 'world'}) @coll.first({:_id => 1234},{}).callback do |res| res['hello'].should == 'world' res['foo'].should == 'bar' done end end it 'should find an object by symbol' do @conn, @coll = connection_and_collection @coll.insert('hello' => 'world') @coll.find({:hello => "world"},{}).defer_as_a.callback do |res| res.size.should >= 1 res[0]["hello"].should == "world" done end end it 'should find an object by id' do @conn, @coll = connection_and_collection id = @coll.insert('hello' => 'world') @coll.find({:_id => id},{}).defer_as_a.callback do |res| res.size.should >= 1 res[0]['hello'].should == "world" done end end it 'should find all objects' do @conn, @coll = connection_and_collection @coll.insert('one' => 'one') @coll.insert('two' => 'two') @coll.find.defer_as_a.callback do |res| res.size.should >= 2 done end end it 'should find objects and sort by the order field' do @conn, @coll = connection_and_collection @coll.insert(:name => 'one', :position => 0) @coll.insert(:name => 'three', :position => 2) @coll.insert(:name => 'two', :position => 1) @coll.find({}, {:order => 'position'}).defer_as_a.callback do |res| res[0]["name"].should == 'one' res[1]["name"].should == 'two' res[2]["name"].should == 'three' done end @coll.find({}, {:order => [:position, :desc]}).defer_as_a.callback do |res| res[0]["name"].should == 'three' res[1]["name"].should == 'two' res[2]["name"].should == 'one' done end end it "should find a single document with find_one/first" do @conn, @coll = connection_and_collection @coll.insert(:name => 'one', :position => 0) @coll.insert(:name => 'three', :position => 2) @coll.insert(:name => 'two', :position => 1) @coll.find_one({},:sort => [:position,-1]).callback do |first| first["position"].should == 2 done end end it 'should find an object using nested properties' do @conn, @coll = connection_and_collection @coll.insert({ 'name' => 'Google', 'address' => { 'cxity' => 'Mountain View', 'state' => 'California'} }) @coll.first('address.cxity' => 'Mountain View').callback do |res| res['name'].should == 'Google' done end end it 'should find objects wxith specific values' do @conn, @coll = connection_and_collection number_hash.each do |num, word| @coll.insert({'num' => num, 'word' => word}) end @coll.find({'num' => {'$in' => [1,3,5]}}).defer_as_a.callback do |res| res.size.should == 3 res.map{|r| r['num'] }.sort.should == [1,3,5] done end end it 'should find objects greater than something' do @conn, @coll = connection_and_collection number_hash.each do |num, word| @coll.insert('num' => num, 'word' => word) end @coll.find({'num' => {'$gt' => 3}}).defer_as_a.callback do |res| res.size.should == 6 res.map{|r| r['num'] }.sort.should == [4,5,6,7,8,9] done end end end describe "insert" do it 'should insert an object' do @conn, @coll = connection_and_collection doc = {'hello' => 'world'} id = @coll.insert(doc) id.should be_a_kind_of(BSON::ObjectId) doc[:_id].should be_a_kind_of(BSON::ObjectId) done end it "should insert multiple documents" do @conn, @coll = connection_and_collection docs = [{'hello' => 'world'}, {'goodbye' => 'womb'}] ids = @coll.insert(docs) ids.should be_a_kind_of(Array) ids[0].should == docs[0][:_id] ids[1].should == docs[1][:_id] done end it 'should insert an object with a custom _id' do @conn, @coll = connection_and_collection id = @coll.insert(:_id => 1234, 'hello' => 'world') id.should == 1234 @coll.first({'hello' => 'world'}).callback do |res| res['_id'].should == 1234 done end end it 'should insert a Time' do @conn, @coll = connection_and_collection t = Time.now.utc.freeze @coll.insert('date' => t) @coll.find.defer_as_a.callback do |res| res[0]['date'].to_s.should == t.to_s done end end it 'should insert a complex object' do @conn, @coll = connection_and_collection obj = { 'array' => [1,2,3], 'float' => 123.456, 'hash' => {'boolean' => true}, 'nil' => nil, 'symbol' => :name, 'string' => 'hello world', 'time' => Time.now.to_f, 'regex' => /abc$/ix } retobj = @coll.insert(obj) @coll.find({:_id => obj[:_id]}).defer_as_a.callback do |ret| ret.size.should == 1 ret[0].each_key do |key| next if key == '_id' ret[0][key].should == obj[key] end done end end context "safe_insert" do it "should succesfully save a document with no errors" do @conn, @coll = connection_and_collection('safe.test') @coll.safe_insert({"hello" => "world"}).callback do |ok| ok.should be_a_kind_of BSON::ObjectId done end end it "should respond with an error when an invalid document is saved" do @conn, @coll = connection_and_collection('safe.test') @coll.create_index("hello", :unique => true) a = {"hello" => "world"} @coll.insert(a) resp = @coll.safe_insert(a).errback do |err| err[0].should == EM::Mongo::OperationFailure done end end end end describe "update" do it 'should update an object' do @conn, @coll = connection_and_collection id = @coll.insert('hello' => 'world') @coll.update({'hello' => 'world'}, {'hello' => 'newworld'}) @coll.find({:_id => id},{}).defer_as_a.callback do |res| res[0]['hello'].should == 'newworld' done end end it 'should update an object wxith $inc' do @conn, @coll = connection_and_collection id = @coll.insert('hello' => 'world') @coll.update({'hello' => 'world'}, {'$inc' => {'count' => 1}}) @coll.find({:_id => id},{}).defer_as_a.callback do |res| res.first['hello'].should == 'world' res.first['count'].should == 1 done end end context "safe_update" do it "should respond with an error when an invalid document is updated" do @conn, @coll = connection_and_collection('safe.update.test') @coll.create_index("x", :unique => true) @coll.insert({"x" => 5}) @coll.insert({"x" => 10}) @coll.safe_update({},{"x" => 10}).errback do |err| err[0].should == EM::Mongo::OperationFailure done end end end end describe "save" do it "should insert a record when no id is present" do @conn, @coll = connection_and_collection id = @coll.save("x" => 1) @coll.find("x" => 1).defer_as_a.callback do |result| result[0]["_id"].should == id done end end it "should update a record when id is present" do @conn, @coll = connection_and_collection doc = {"x" => 1} id = @coll.save(doc) doc["x"] = 2 @coll.save(doc).should be_true @coll.find().defer_as_a.callback do |result| result.count.should == 1 result[0]["x"].should == 2 done end end context "safe_save" do it "should respond with an error when an invalid document is updated" do @conn, @coll = connection_and_collection('safe.save.test') @coll.create_index("x", :unique => true) @coll.save({"x" => 5}) @coll.save({"x" => 5}) @coll.safe_save({"x" => 5}).errback do |err| err[0].should == EM::Mongo::OperationFailure done end end end end describe "remove" do it 'should remove an object' do @conn, @coll = connection_and_collection id = @coll.insert('hello' => 'world') @coll.remove(:_id => id) @coll.find({'hello' => "world"}).defer_as_a.callback do |res| res.size.should == 0 done end end it 'should remove all objects' do @conn, @coll = connection_and_collection @coll.insert('one' => 'one') @coll.insert('two' => 'two') @coll.remove @coll.find.defer_as_a.callback do |res| res.size.should == 0 done end end end describe "find_and_modify" do it "should find and modify a document" do @conn, @coll = connection_and_collection @coll << { :a => 1, :processed => false } @coll << { :a => 2, :processed => false } @coll << { :a => 3, :processed => false } resp = @coll.find_and_modify(:query => {}, :sort => [['a', -1]], :update => {"$set" => {:processed => true}}) resp.callback do |doc| doc['processed'].should_not be_true @coll.find_one({:a=>3}).callback do |updated| updated['processed'].should be_true done end end end it "should fail with invalid options" do @conn, @coll = connection_and_collection @coll << { :a => 1, :processed => false } @coll << { :a => 2, :processed => false } @coll << { :a => 3, :processed => false } resp = @coll.find_and_modify(:blimey => {}) resp.errback do |err| err[0].should == EM::Mongo::OperationFailure done end end end describe "mapreduce" do it "should map, and then reduce" do @conn, @coll = connection_and_collection @coll << { "user_id" => 1 } @coll << { "user_id" => 2 } m = "function() { emit(this.user_id, 1); }" r = "function(k,vals) { return 1; }" res = @coll.map_reduce(m, r, :out => 'foo') res.callback do |collection| collection.find_one({"_id" => 1}).callback do |doc| doc.should_not be_nil collection.find_one({"_id" => 2}).callback do |doc2| doc2.should_not be_nil done end end end end it "should work with code objects" do @conn, @coll = connection_and_collection @coll << { "user_id" => 1 } @coll << { "user_id" => 2 } m = BSON::Code.new "function() { emit(this.user_id, 1); }" r = BSON::Code.new "function(k,vals) { return 1; }" res = @coll.map_reduce(m, r, :out => 'foo') res.callback do |collection| collection.find_one({"_id" => 1}).callback do |doc| doc.should_not be_nil collection.find_one({"_id" => 2}).callback do |doc2| doc2.should_not be_nil done end end end end it "should respect a query" do @conn, @coll = connection_and_collection @coll << { "user_id" => 1 } @coll << { "user_id" => 2 } @coll << { "user_id" => 3 } m = BSON::Code.new "function() { emit(this.user_id, 1); }" r = BSON::Code.new "function(k,vals) { return 1; }" res = @coll.map_reduce(m, r, :query => {"user_id" => {"$gt" => 1}}, :out => 'foo') res.callback do |collection| collection.count .callback do |c| c.should == 2 collection.find_one({"_id" => 2}).callback do |doc| doc.should_not be_nil collection.find_one({"_id" => 3}).callback do |doc2| doc2.should_not be_nil done end end end end end it "should return a raw response if requested" do @conn, @coll = connection_and_collection m = BSON::Code.new("function() { emit(this.user_id, 1); }") r = BSON::Code.new("function(k,vals) { return 1; }") res = @coll.map_reduce(m, r, :raw => true, :out => 'foo') res.callback do |res| res["result"].should_not be_nil res["counts"].should_not be_nil res["timeMillis"].should_not be_nil done end end it "should use an output collection if specified" do @conn, @coll = connection_and_collection output_collection = "test-map-coll" m = BSON::Code.new("function() { emit(this.user_id, 1); }") r = BSON::Code.new("function(k,vals) { return 1; }") res = @coll.map_reduce(m, r, :raw => true, :out => output_collection) res.callback do |res| res["result"].should == output_collection res["counts"].should_not be_nil res["timeMillis"].should_not be_nil done end end end describe "distinct" do it "shoud perform a distinct query" do @conn, @coll = connection_and_collection @coll.insert([{:a => 0, :b => {:c => "a"}}, {:a => 1, :b => {:c => "b"}}, {:a => 1, :b => {:c => "c"}}, {:a => 2, :b => {:c => "a"}}, {:a => 3}, {:a => 3}]) @coll.distinct(:a).callback do |vals| vals.sort.should == [0,1,2,3] @coll.distinct("b.c").callback do |vals2| vals2.sort.should == ["a","b","c"] done end end end it "should respect a query" do @conn, @coll = connection_and_collection @coll.insert([{:a => 0, :b => {:c => "a"}}, {:a => 1, :b => {:c => "b"}}, {:a => 1, :b => {:c => "c"}}, {:a => 2, :b => {:c => "a"}}, {:a => 3}, {:a => 3}]) @coll.distinct(:a, {:a => {"$gt" => 1}}).callback do |vals| vals.sort.should == [2,3] done end end it "should respect a query and nested objects" do @conn, @coll = connection_and_collection @coll.insert([{:a => 0, :b => {:c => "a"}}, {:a => 1, :b => {:c => "b"}}, {:a => 1, :b => {:c => "c"}}, {:a => 2, :b => {:c => "a"}}, {:a => 3}, {:a => 3}]) @coll.distinct("b.c", {"b.c" => {"$ne" => "c"}}).callback do |vals| vals.sort.should == ["a","b"] done end end end describe "group" do it "should fail if missing required options" do @conn, @coll = connection_and_collection lambda { @coll.group(:initial => {}) }.should raise_error EM::Mongo::MongoArgumentError lambda { @coll.group(:reduce => "foo") }.should raise_error EM::Mongo::MongoArgumentError done end it "should group results using eval form" do @conn, @coll = connection_and_collection @coll.save("a" => 1) @coll.save("b" => 1) @initial = {"count" => 0} @reduce_function = "function (obj, prev) { prev.count += inc_value; }" @coll.group(:initial => @initial, :reduce => BSON::Code.new(@reduce_function, {"inc_value" => 0.5})).callback do |result| result[0]["count"].should == 1 done end @coll.group(:initial => @initial, :reduce => BSON::Code.new(@reduce_function, {"inc_value" => 1})).callback do |result| result[0]["count"].should == 2 done end @coll.group(:initial => @initial, :reduce => BSON::Code.new(@reduce_function, {"inc_value" => 2})).callback do |result| result[0]["count"].should == 4 done end end it "should finalize grouped results" do @conn, @coll = connection_and_collection @coll.save("a" => 1) @coll.save("b" => 1) @initial = {"count" => 0} @reduce_function = "function (obj, prev) { prev.count += inc_value; }" @finalize = "function(doc) {doc.f = doc.count + 200; }" @coll.group(:initial => @initial, :reduce => BSON::Code.new(@reduce_function, {"inc_value" => 1}), :finalize => BSON::Code.new(@finalize)).callback do |results| results[0]["f"].should == 202 done end end end describe "grouping with a key" do it "should group" do @conn, @coll = connection_and_collection @coll.save("a" => 1, "pop" => 100) @coll.save("a" => 1, "pop" => 100) @coll.save("a" => 2, "pop" => 100) @coll.save("a" => 2, "pop" => 100) @initial = {"count" => 0, "foo" => 1} @reduce_function = "function (obj, prev) { prev.count += obj.pop; }" @coll.group(:key => :a, :initial => @initial, :reduce => @reduce_function).callback do |result| result.all? {|r| r['count'] = 200 }.should be_true done end end end describe "grouping with a function" do it "should group results" do @conn, @coll = connection_and_collection @coll.save("a" => 1) @coll.save("a" => 2) @coll.save("a" => 3) @coll.save("a" => 4) @coll.save("a" => 5) @initial = {"count" => 0} @keyf = "function (doc) { if(doc.a % 2 == 0) { return {even: true}; } else {return {odd: true}} };" @reduce = "function (obj, prev) { prev.count += 1; }" @coll.group(:keyf => @keyf, :initial => @initial, :reduce => @reduce).callback do |results| res = results.sort {|a,b| a['count'] <=> b['count']} (res[0]['even'] && res[0]['count']).should == 2.0 (res[1]['odd'] && res[1]['count']) == 3.0 done end end it "should group filtered results" do @conn, @coll = connection_and_collection @coll.save("a" => 1) @coll.save("a" => 2) @coll.save("a" => 3) @coll.save("a" => 4) @coll.save("a" => 5) @initial = {"count" => 0} @keyf = "function (doc) { if(doc.a % 2 == 0) { return {even: true}; } else {return {odd: true}} };" @reduce = "function (obj, prev) { prev.count += 1; }" @coll.group(:keyf => @keyf, :cond => {:a => {'$ne' => 2}}, :initial => @initial, :reduce => @reduce).callback do |results| res = results.sort {|a, b| a['count'] <=> b['count']} (res[0]['even'] && res[0]['count']).should == 1.0 (res[1]['odd'] && res[1]['count']) == 3.0 done end end end context "indexes" do it "should create an index using symbols" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index :foo, :name => :bar @collection.index_information.callback do |info| info['bar'].should_not be_nil done end end it "should create a flat 2d index" do @conn, @geo = connection_and_collection('2d') @geo.save({'loc' => [-100, 100]}) @geo.create_index([['loc', EM::Mongo::FLAT2D]]) @geo.index_information.callback do |info| info['loc_2d'].should_not be_nil done end end it "should create a sphere 2d index" do @conn, @geo = connection_and_collection('2dsphere') @geo.save({'loc' => [-100, 100]}) @geo.create_index([['loc', EM::Mongo::SPHERE2D]]) @geo.index_information.callback do |info| info['loc_2dsphere'].should_not be_nil done end end it "should create a unique index" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['a', EM::Mongo::ASCENDING]], :unique => true) @collection.index_information.callback do |info| info['a_1']['unique'].should == true done end end it "should create an index in the background" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['b', EM::Mongo::ASCENDING]], :background => true) @collection.index_information.callback do |info| info['b_1']['background'].should == true done end end it "should require an array of arrays" do @conn, @collection = connection_and_collection('test-collection') proc { @collection.create_index(['c', EM::Mongo::ASCENDING]) }.should raise_error done end it "should enforce proper index types" do @conn, @collection = connection_and_collection('test-collection') proc { @collection.create_index([['c', 'blah']]) }.should raise_error done end it "should allow an alernate name to be specified" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index :bar, :name => 'foo_index' @collection.index_information.callback do |info| info['foo_index'].should_not be_nil done end end it "should generate indexes in the proper order" do @conn, @collection = connection_and_collection('test-collection') @collection.should_receive(:insert_documents) do |sel, coll| sel[0][:name].should == 'b_1_a_1' end @collection.create_index([['b',1],['a',1]]) done end it "should allow multiple calls to create_index" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['a',1]]).should be_true @collection.create_index([['a',1]]).should be_true done end it "should allow the creation of multiple indexes" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['a',1]]).should be_true @collection.create_index([['b',1]]).should be_true done end it "should return a properly ordered index info" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['b',1],['a',1]]) @collection.index_information.callback do |info| info['b_1_a_1'].should_not be_nil done end end it "should drop an index" do @conn, @collection = connection_and_collection('test-collection') @collection.create_index([['a',EM::Mongo::ASCENDING]]) @collection.index_information.callback do |info| info['a_1'].should_not be_nil @collection.drop_index([['a',EM::Mongo::ASCENDING]]).callback do @collection.index_information.callback do |info| info['a_1'].should be_nil done end end end end end it 'should handle multiple pending queries' do @conn, @coll = connection_and_collection id = @coll.insert("foo" => "bar") received = 0 10.times do |n| @coll.first("_id" => id).callback do |res| received += 1 done end end end end em-mongo-0.5.1/spec/integration/database_spec.rb0000644000004100000410000001216212313272263021646 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') describe EMMongo::Database do include EM::Spec it 'should add a user' do @conn = EM::Mongo::Connection.new @db = @conn.db @db.collection(EM::Mongo::Database::SYSTEM_USER_COLLECTION).remove({}) @db.add_user('test', 'test').callback do |res| res.should_not == nil res.should be_a_kind_of(BSON::ObjectId) done end end it 'should authenticate a user' do @conn = EM::Mongo::Connection.new @db = @conn.db @db.add_user('test', 'test') @db.authenticate('test', 'test').callback do |res| res.should == true done end end it "should create a collection" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.create_collection("a").callback do |col| col.should be_kind_of EM::Mongo::Collection col.name.should == "a" done end end it "should drop a collection" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.create_collection("a").callback do |col| @db.drop_collection("a").callback do @db.collection_names.callback do |names| names.should_not include "a" done end end end end it "should provide a list of collection names in the database" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.create_collection "a" @db.create_collection("b").callback do @db.collection_names.callback do |names| names.should include "a" names.should include "b" done end end end it "should provide a list of collections in the database" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.create_collection "a" @db.create_collection("b").callback do @db.collection_names.callback do |names| @db.collections do |collections| collections.length.should == names.length collections.each do |col| col.should be_kind_of EM::Mongo::Collection end end done end end end it 'should cache collections correctly' do @conn = EM::Mongo::Connection.new @db = @conn.db a = @db.collection('first_collection') b = @db.collection('second_collection') a.should_not == b @db.collection('first_collection').should == a @db.collection('second_collection').should == b done end describe "Errors" do describe "when there are no errors" do it "should return a nil 'err' from get_last_error" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.reset_error_history.callback do @db.get_last_error.callback do |doc| doc['err'].should be_nil done end end end it "should have a false error?" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.reset_error_history.callback do @db.error?.callback do |result| result.should == false done end end end end describe "when there are errors" do it "should return a value for 'err' from get_last_error" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.command({:forceerror=>1}, :check_response => false).callback do @db.get_last_error.callback do |doc| doc['err'].should_not be_nil done end end end it "should have a true error?" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.command({:forceerror=>1}, :check_response => false).callback do @db.error?.callback do |result| result.should == true done end end end end it "should be able to reset the error history" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.command({:forceerror=>1}, :check_response => false).callback do @db.reset_error_history.callback do @db.error?.callback do |result| result.should == false done end end end end end describe "Command" do it "should fail when the database returns an error" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.command({:non_command => 1}, :check_response => true).errback do done end end it "should not fail when checkresponse is false" do @conn = EM::Mongo::Connection.new @db = @conn.db @db.command({:non_command => 1}, :check_response => false).callback do done end end it "should succesfully execute a valid command" do @conn, @coll = connection_and_collection @db = @conn.db @coll.insert( {:col => {:easy => "andy" } } ) @db.command({:collstats => @coll.name}).callback do |doc| doc.should_not be_nil doc["count"].should == 1 done end end end describe "Indexes" do #Index functions are integration tested via the collection specs. Maybe the wrong order, #but the collection index functions all call down to the database index functions, and the #tests would simply duplicate eachother end end em-mongo-0.5.1/spec/unit/0000755000004100000410000000000012313272263015175 5ustar www-datawww-dataem-mongo-0.5.1/spec/unit/bson_spec.rb0000644000004100000410000000133112313272263017473 0ustar www-datawww-datarequire File.expand_path('spec_helper', File.dirname(__FILE__) + '/../') # This is to prove how BSON works so we can code around it where appropriate # BSON documents and Ruby Hashes are not the same thing afterall. # # http://www.mongodb.org/display/DOCS/BSON describe BSON do it 'should do what it does' do doc = {:_id => 12345, :foo => 'notbar', "foo" => "bar", :hello => :world } doc = BSON::BSON_CODER.deserialize(BSON::BSON_CODER.serialize(doc, false, true).to_s) # 1. An ID passed as Symbol is really a String doc['_id'].should == 12345 # 2. More to the point, all keys are Strings. doc['hello'].should == :world # 3. The last String/Symbol wins doc['foo'].should == 'bar' end end em-mongo-0.5.1/spec/gem/0000755000004100000410000000000012313272263014766 5ustar www-datawww-dataem-mongo-0.5.1/spec/gem/Gemfile0000644000004100000410000000012312313272263016255 0ustar www-datawww-datasource :gemcutter gem 'eventmachine' gem 'bson' gem 'em-mongo', :path => '../../' em-mongo-0.5.1/spec/gem/bundler.rb0000755000004100000410000000064412313272263016755 0ustar www-datawww-data#!/usr/bin/env bundle exec ruby require "rubygems" require "bundler" Bundler.setup(:default) require "eventmachine" require "em-mongo" $return = -1 EM.run do @conn = EM::Mongo::Connection.new EM.next_tick do id = @conn.db.collection('test').insert({:hello => "world"}) @conn.db.collection('test').first(:_id => id) do |document| $return = 0 if document EM.stop end end end exit($return) em-mongo-0.5.1/spec/gem/rubygems.rb0000755000004100000410000000055612313272263017161 0ustar www-datawww-data#!/usr/bin/env ruby require "rubygems" require "eventmachine" require "em-mongo" $return = -1 EM.run do @conn = EM::Mongo::Connection.new EM.next_tick do id = @conn.db.collection('test').insert({:hello => "world"}) @conn.db.collection('test').first(:_id => id) do |document| $return = 0 if document EM.stop end end end exit($return) em-mongo-0.5.1/README.rdoc0000644000004100000410000002251412313272263015076 0ustar www-datawww-data Em-mongo is no longer being maintained (hasn't been for some time). If you are interested in the commit bit or want to take over the full project, please let me know! = EM-Mongo An EventMachine client for MongoDB. Originally based on RMongo, this client aims to be as api compatible with mongo-ruby-driver as possible. For methods that do not retrieve data from the database the api of em-mongo should be identical (though a subset) to the mongo-ruby-driver. This includes the various update methods like insert, save and update (without the :safe flag, which is handled separately) as well as find, which returns a cursor. For operations that require IO, em-mongo always returns an EventMachine deferrable. == Some examples #this file can be found in the examples directory. # bundle exec examples/readme.rb #insert a few records, then read some back using Collection#find require 'em-mongo' require 'eventmachine' EM.run do db = EM::Mongo::Connection.new('localhost').db('my_database') collection = db.collection('my_collection') EM.next_tick do (1..10).each do |i| collection.insert( { :revolution => i } ) end #find returns an EM::Mongo::Cursor cursor = collection.find #most cursor methods return an EM::Mongo::RequestResponse, #which is an EventMachine::Deferrable resp = cursor.defer_as_a #when em-mongo IO methods succeed, they #will always call back with the return #value you would have expected from the #synchronous version of the same method from #the mongo-ruby-driver resp.callback do |documents| puts "I just got #{documents.length} documents! I'm really cool!" end #when em-mongo IO methods fail, they #errback with an array in the form #[ErrorClass, "error message"] resp.errback do |err| raise *err end #iterate though each result in a query collection.find( :revolution => { "$gt" => 5 } ).limit(1).skip(1).each do |doc| #unlike the mongo-ruby-driver, each returns null at the end of the cursor if doc puts "Revolution ##{doc['revolution']}" end end #add an index collection.create_index [[:revolution, -1]] #insert a document and ensure it gets written save_resp = collection.safe_save( { :hi => "there" }, :last_error_params => {:fsync=>true} ) save_resp.callback { puts "Hi is there, let us give thanks" } save_resp.errback { |err| puts "AAAAAAAAAAAAAAAARGH! Oh why! WHY!?!?!" } collection.drop EM.add_periodic_timer(1) { EM.stop } end end == Error handling em-mongo will present errors in two different ways. First, em-mongo will raise exceptions like any other synchronous library if an error is enountered in a method that does not need to perform IO or if an error is encountered prior to peforming IO. Errors returned by the database, or errors communicating with the database, will be delivered via standard EM::Deferrable errbacks. While it is tempting to subscribe just to a callback my_collection.find.defer_as_a.callback {|docs| ... } in the case of an error you will never receive a response. If you are waiting for a response before your program continues, you will be waiting a very long time. A better approach would be to store the deferrable into a variable and subscribe to its callback and errback resp = my_collection.find.defer_as_a resp.callback { |docs| ... } resp.errback { |err| raise *err } errback's blocks will always be called with a single argument which is a two element array containing the error class and the error message [EM::Mongo::OperationError, "aw snap"] == Safe Writes As you are probably aware the default behavior for the mongo-ruby-driver, and therefore em-mongo, is to send update messages to MongoDB in a fire-and-forget manner. This means that if a unique index is violated, or some other problem causes MongoDB to raise an exception and refuse to apply your changes, you'll never know about it until you go to look for that record later. For many applications such as logging this might be OK, but for many use cases like analytics you will want to know if your writes don't succeed. This is one place where em-mongo diverges substantially from the mongo-ruby-driver because an unsafe write will not receive a response from the server, whereas a safe write will receive a response from the server and requires a deferrable and a callback. #default, unsafe write my_collection.insert( {:a => "b" } ) #a safe write using em-mongo insert_resp = my_collection.safe_insert( {:a => "b" } ) insert_resp.callback { #all ok } insert_resp.errback { |err| puts '' } em-mongo has the following safe methods: safe_insert, safe_update, safe_save In addition to calling your errback if the write fails, you can provide the usual 'safety' options that you can to Database#get_last_error, such as :fsync => true or :w => 2, to control the degree of safety you want. Please the 10gen documentation on DB#get_last_error for specifics. safe_insert( {:a=>"v"}, :last_error_params => { :fsync => true, :w => 5 } ) == Documentation em-mongo now has some YARD docs. These are mostly ported directly from the mongo-ruby-driver. While they have been updated to reflect em-mongo's async API, there are probably a few errors left over in the translation. Please file an issue or submit a pull request if you notice any inaccuracies. http://rubydoc.info/github/bcg/em-mongo/master/frames == Upgrading **The API for em-mongo has changed since version 0.3.6.** em-mongo methods no longer directly accept callbacks and instead return EM::Mongo::RequestResponse objects, which are EM::Deferrable(s). This means you need to convert calls like this my_collection.first() { |doc| p doc } to this my_collection.first().callback { |doc| p doc } EM::Mongo::Collection#find now returns a cursor, not an array, to maintain compatibility with the mongo-ruby-driver. This provides a great deal more flexibility, but requires you to select a specific cursor method to actually fetch data from the server, such as #defer_as_a or #next my_collection.find() { |docs| ... } becomes my_collection.find.defer_as_a.callback { |docs| ... } If for some reason you aren't ready to upgrade your project but you want to be able to use the newer gem, you can require a compatibility file that will revert the new API to the API found in 0.3.6 require 'em-mongo' require 'em-mongo/prev.rb' This file will not remain in the project forever, though, so it is better to upgrade your projects sooner rather than later. == What's in the box? === Collection CRUD operations #find, #find_one, #save, #safe_save, #insert, #save_insert, #update, #safe_update, #remove, #find_and_modify Index management #create_index, #drop_index Collection management #drop, #stats, #count, #name Server-side aggregations #map_reduce, #group, #distinct === Database Collection management #collection, #collection_names, #collections, #collections_info, #create_collection, #drop_collection Index management #drop_index, #index_information Authentication #authenticate, #add_user Misc #get_last_error, #error?, #name, #command === Cursor Query options :selector, :order, :skip, :limit, :explain, :batch_size, :fields, :tailable, :transformer Enumerable-ish **EM::Mongo::Cursor does **not** use the Enumerable mixin for obvious reasons** #next_document, #rewind!, #has_next?, #count, #each, #to_a Misc #batch_size, #explain, #close, #closed? Query modifier methods #sort, #limit, #skip == Compatibility * em-mongo has been tested on Ruby 1.8.7 and 1.9.2 * em-mongo will not run under JRuby. We'd love some help figuring out why :) * Compatibility with other runtimes is unknown == Still Missing / TODO * Replica Sets * GRIDFS support * Connection pooling * PK factories * JRuby support == Contact * Twitter: @brendengrace * IRC: bcg * Email: brenden.grace@gmail.com == Credit Aman Gupta (tmm1) wrote the original RMongo which em-mongo is based on. == References * Rmongo: http://github.com/tmm1/rmongo * EM-Mongo: http://github.com/bcg/em-mongo * mongo-ruby-driver: http://github.com/mongodb/mongo-ruby-driver * Mongo Wire Protocol: http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol == License (The MIT License) Copyright © 2010 Brenden Grace Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ‘Software’), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED ‘AS IS’, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. em-mongo-0.5.1/lib/0000755000004100000410000000000012313272263014032 5ustar www-datawww-dataem-mongo-0.5.1/lib/em-mongo/0000755000004100000410000000000012313272263015550 5ustar www-datawww-dataem-mongo-0.5.1/lib/em-mongo/core_ext.rb0000644000004100000410000000047212313272263017710 0ustar www-datawww-data#:nodoc: class String #:nodoc: def to_bson_code BSON::Code.new(self) end end #:nodoc: class Hash #:nodoc: def assert_valid_keys(*valid_keys) unknown_keys = keys - [valid_keys].flatten raise(ArgumentError, "Unknown key(s): #{unknown_keys.join(", ")}") unless unknown_keys.empty? end endem-mongo-0.5.1/lib/em-mongo/request_response.rb0000644000004100000410000000100412313272263021476 0ustar www-datawww-datamodule EM module Mongo class RequestResponse include EM::Deferrable def status @deferred_status end def completed? [:succeeded, :failed].include?(status) end def succeeded? status == :succeeded end def failed? status == :failed end def data @deferred_args[-1] if succeeded? && @deferred_args end def error @deferred_args[-1] if failed? && @deferred_args end end end end em-mongo-0.5.1/lib/em-mongo/connection.rb0000644000004100000410000001607112313272263020241 0ustar www-datawww-datamodule EM::Mongo DEFAULT_IP = "127.0.0.1" DEFAULT_PORT = 27017 DEFAULT_DB = "db" DEFAULT_NS = "ns" DEFAULT_QUERY_DOCS = 101 OP_REPLY = 1 OP_MSG = 1000 OP_UPDATE = 2001 OP_INSERT = 2002 OP_QUERY = 2004 OP_GET_MORE = 2005 OP_DELETE = 2006 OP_KILL_CURSORS = 2007 OP_QUERY_TAILABLE = 2 ** 1 OP_QUERY_SLAVE_OK = 2 ** 2 OP_QUERY_OPLOG_REPLAY = 2 ** 3 OP_QUERY_NO_CURSOR_TIMEOUT = 2 ** 4 OP_QUERY_AWAIT_DATA = 2 ** 5 OP_QUERY_EXHAUST = 2 ** 6 ASCENDING = 1 DESCENDING = -1 FLAT2D = '2d' SPHERE2D = '2dsphere' GEO2D = '2d' DEFAULT_MAX_BSON_SIZE = 4 * 1024 * 1024 class EMConnection < EM::Connection MAX_RETRIES = 5 class Error < Exception; class ConnectionNotBound end end include EM::Deferrable RESERVED = 0 STANDARD_HEADER_SIZE = 16 RESPONSE_HEADER_SIZE = 20 attr_reader :connection def responses_pending? @responses.size >= 1 end def connected? @is_connected end def new_request_id @request_id += 1 end def slave_ok? @slave_ok end # MongoDB Commands def prepare_message(op, message, options={}) req_id = new_request_id message.prepend!(message_headers(op, req_id, message)) req_id = prepare_safe_message(message,options) if options[:safe] [req_id, message.to_s] end def prepare_safe_message(message,options) db_name = options[:db_name] unless db_name raise( ArgumentError, "You must include the :db_name option when :safe => true" ) end last_error_params = options[:last_error_params] || false last_error_message = BSON::ByteBuffer.new build_last_error_message(last_error_message, db_name, last_error_params) last_error_id = new_request_id last_error_message.prepend!(message_headers(EM::Mongo::OP_QUERY, last_error_id, last_error_message)) message.append!(last_error_message) last_error_id end def message_headers(operation, request_id, message) headers = BSON::ByteBuffer.new headers.put_int(16 + message.size) headers.put_int(request_id) headers.put_int(0) headers.put_int(operation) headers end def send_command(op, message, options={}, &cb) request_id, buffer = prepare_message(op, message, options) callback do send_data buffer end @responses[request_id] = cb if cb request_id end # EM hooks def initialize(options={}) @request_id = 0 @retries = 0 @responses = {} @is_connected = false @host = options[:host] || DEFAULT_IP @port = options[:port] || DEFAULT_PORT @on_unbind = options[:unbind_cb] || proc {} @reconnect_in = options[:reconnect_in]|| false @slave_ok = options[:slave_ok] || false @on_close = proc { raise Error, "failure with mongodb server #{@host}:#{@port}" } timeout options[:timeout] if options[:timeout] errback { @on_close.call } end def self.connect(host = DEFAULT_IP, port = DEFAULT_PORT, timeout = nil, opts = nil) opt = {:host => host, :port => port, :timeout => timeout, :reconnect_in => false}.merge(opts) EM.connect(host, port, self, opt) end def connection_completed @buffer = BSON::ByteBuffer.new @is_connected = true @retries = 0 succeed end def message_received?(buffer) x= remaining_bytes(@buffer) x > STANDARD_HEADER_SIZE && x >= peek_size(@buffer) end def remaining_bytes(buffer) buffer.size-buffer.position end def peek_size(buffer) position= buffer.position size= buffer.get_int buffer.position= position size end def receive_data(data) @buffer.append!(data) @buffer.rewind while message_received?(@buffer) response = next_response callback = @responses.delete(response.response_to) callback.call(response) if callback end if @buffer.more? if @buffer.position > 0 remaining_bytes= @buffer.size-@buffer.position @buffer = BSON::ByteBuffer.new(@buffer.to_s[@buffer.position,remaining_bytes]) @buffer.rewind end else @buffer.clear end close_connection if @close_pending && @responses.empty? end def next_response() ServerResponse.new(@buffer, self) end def unbind if @is_connected @responses.values.each { |resp| resp.call(:disconnected) } @request_id = 0 @responses = {} end @is_connected = false set_deferred_status(nil) if @reconnect_in && @retries < MAX_RETRIES EM.add_timer(@reconnect_in) { reconnect(@host, @port) } elsif @on_unbind @on_unbind.call else raise "Connection to Mongo Lost" end @retries += 1 end def close @on_close = proc { yield if block_given? } if @responses.empty? close_connection_after_writing else @close_pending = true end end # Constructs a getlasterror message. This method is used exclusively by # Connection#send_message_with_safe_check. # # Because it modifies message by reference, we don't need to return it. def build_last_error_message(message, db_name, opts) message.put_int(0) BSON::BSON_RUBY.serialize_cstr(message, "#{db_name}.$cmd") message.put_int(0) message.put_int(-1) cmd = BSON::OrderedHash.new cmd[:getlasterror] = 1 if opts.is_a?(Hash) opts.assert_valid_keys(:w, :wtimeout, :fsync) cmd.merge!(opts) end message.put_binary(BSON::BSON_CODER.serialize(cmd, false).to_s) nil end end # An em-mongo Connection class Connection # Initialize and connect to a MongoDB instance # @param [String] host the host name or IP of the mongodb server to connect to # @param [Integer] port the port the mongodb server is listening on # @param [Integer] timeout the connection timeout # @opts [Hash] opts connection options def initialize(host = DEFAULT_IP, port = DEFAULT_PORT, timeout = nil, opts = {}) @em_connection = EMConnection.connect(host, port, timeout, opts) @db = {} end # Return a database with the given name. # # @param [String] db_name a valid database name. # # @return [EM::Mongo::Database] def db(name = DEFAULT_DB) @db[name] ||= EM::Mongo::Database.new(name, self) end # Close the connection to the database. def close @em_connection.close end #@return [true, false] # whether or not the connection is currently connected def connected? @em_connection.connected? end def send_command(*args, &block);@em_connection.send_command(*args, &block);end # Is it okay to connect to a slave? # # @return [Boolean] def slave_ok?;@em_connection.slave_ok?;end end end em-mongo-0.5.1/lib/em-mongo/conversions.rb0000644000004100000410000000604012313272263020445 0ustar www-datawww-data# encoding: UTF-8 # -- # Copyright (C) 2008-2010 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++ module EM::Mongo #:nodoc: # Utility module to include when needing to convert certain types of # objects to mongo-friendly parameters. module Conversions ASCENDING_CONVERSION = ["ascending", "asc", "1"] DESCENDING_CONVERSION = ["descending", "desc", "-1"] # Converts the supplied +Array+ to a +Hash+ to pass to mongo as # sorting parameters. The returned +Hash+ will vary depending # on whether the passed +Array+ is one or two dimensional. # # Example: # # array_as_sort_parameters([["field1", :asc], ["field2", :desc]]) => # { "field1" => 1, "field2" => -1} def array_as_sort_parameters(value) order_by = BSON::OrderedHash.new if value.first.is_a? Array value.each do |param| if (param.class.name == "String") order_by[param] = 1 else order_by[param[0]] = sort_value(param[1]) unless param[1].nil? end end elsif !value.empty? if order_by.size == 1 order_by[value.first] = 1 else order_by[value.first] = sort_value(value[1]) end end order_by end # Converts the supplied +String+ or +Symbol+ to a +Hash+ to pass to mongo as # a sorting parameter with ascending order. If the +String+ # is empty then an empty +Hash+ will be returned. # # Example: # # *DEPRECATED # # string_as_sort_parameters("field") => { "field" => 1 } # string_as_sort_parameters("") => {} def string_as_sort_parameters(value) return {} if (str = value.to_s).empty? { str => 1 } end # Converts the +String+, +Symbol+, or +Integer+ to the # corresponding sort value in MongoDB. # # Valid conversions (case-insensitive): # # ascending, asc, :ascending, :asc, 1 => 1 # descending, desc, :descending, :desc, -1 => -1 # # If the value is invalid then an error will be raised. def sort_value(value) val = value.to_s.downcase return 1 if ASCENDING_CONVERSION.include?(val) return -1 if DESCENDING_CONVERSION.include?(val) raise InvalidSortValueError.new( "#{self} was supplied as a sort direction when acceptable values are: " + "EM::Mongo::ASCENDING, 'ascending', 'asc', :ascending, :asc, 1, EM::Mongo::DESCENDING, " + "'descending', 'desc', :descending, :desc, -1.") end end end em-mongo-0.5.1/lib/em-mongo/support.rb0000644000004100000410000000470412313272263017616 0ustar www-datawww-data# encoding: UTF-8 # -- # Copyright (C) 2008-2010 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++ require 'digest/md5' module EM::Mongo module Support include EM::Mongo::Conversions extend self # Generate an MD5 for authentication. # # @param [String] username # @param [String] password # @param [String] nonce # # @return [String] a key for db authentication. def auth_key(username, password, nonce) Digest::MD5.hexdigest("#{nonce}#{username}#{hash_password(username, password)}") end # Return a hashed password for auth. # # @param [String] username # @param [String] plaintext # # @return [String] def hash_password(username, plaintext) Digest::MD5.hexdigest("#{username}:mongo:#{plaintext}") end def validate_db_name(db_name) unless [String, Symbol].include?(db_name.class) raise TypeError, "db_name must be a string or symbol" end [" ", ".", "$", "/", "\\"].each do |invalid_char| if db_name.include? invalid_char raise EM::Mongo::InvalidNSName, "database names cannot contain the character '#{invalid_char}'" end end raise EM::Mongo::InvalidNSName, "database name cannot be the empty string" if db_name.empty? db_name end def format_order_clause(order) case order when String, Symbol then string_as_sort_parameters(order) when Array then array_as_sort_parameters(order) else raise InvalidSortValueError, "Illegal sort clause, '#{order.class.name}'; must be of the form " + "[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]" end end # Determine if a database command has succeeded by # checking the document response. # # @param [Hash] doc # # @return [Boolean] true if the 'ok' key is either 1 or *true*. def ok?(doc) doc['ok'] == 1.0 || doc['ok'] == true end end end em-mongo-0.5.1/lib/em-mongo/database.rb0000644000004100000410000003251512313272263017647 0ustar www-datawww-datamodule EM::Mongo class Database SYSTEM_NAMESPACE_COLLECTION = "system.namespaces" SYSTEM_INDEX_COLLECTION = "system.indexes" SYSTEM_PROFILE_COLLECTION = "system.profile" SYSTEM_USER_COLLECTION = "system.users" SYSTEM_JS_COLLECTION = "system.js" SYSTEM_COMMAND_COLLECTION = "$cmd" # The length of time that Collection.ensure_index should cache index calls attr_accessor :cache_time # @param [String] name the database name. # @param [EM::Mongo::Connection] connection a connection object pointing to MongoDB. Note # that databases are usually instantiated via the Connection class. See the examples below. # # @core databases constructor_details def initialize(name = DEFAULT_DB, connection = nil) @db_name = name @em_connection = connection || EM::Mongo::Connection.new @collection = nil @collections = {} @cache_time = 300 #5 minutes. end # Get a collection by name. # # @param [String, Symbol] name the collection name. # # @return [EM::Mongo::Collection] def collection(name = EM::Mongo::DEFAULT_NS) @collections[name] ||= EM::Mongo::Collection.new(@db_name, name, @em_connection) end # Get the connection associated with this database # # @return [EM::Mongo::Connection] def connection @em_connection end #Get the name of this database # # @return [String] def name @db_name end # Get an array of collection names in this database. # # @return [EM::Mongo::RequestResponse] def collection_names response = RequestResponse.new name_resp = collections_info.defer_as_a name_resp.callback do |docs| names = docs.collect{ |doc| doc['name'] || '' } names = names.delete_if {|name| name.index(self.name).nil? || name.index('$')} names = names.map{ |name| name.sub(self.name + '.','')} response.succeed(names) end name_resp.errback { |err| response.fail err } response end # Get an array of Collection instances, one for each collection in this database. # # @return [EM::Mongo::RequestResponse] def collections response = RequestResponse.new name_resp = collection_names name_resp.callback do |names| collections = names.map do |name| EM::Mongo::Collection.new(@db_name, name, @em_connection) end response.succeed collections end name_resp.errback { |err| response.fail err } response end # Get info on system namespaces (collections). This method returns # a cursor which can be iterated over. For each collection, a hash # will be yielded containing a 'name' string and, optionally, an 'options' hash. # # @param [String] coll_name return info for the specifed collection only. # # @return [EM::Mongo::Cursor] def collections_info(coll_name=nil) selector = {} selector[:name] = full_collection_name(coll_name) if coll_name Cursor.new(EM::Mongo::Collection.new(@db_name, SYSTEM_NAMESPACE_COLLECTION, @em_connection), :selector => selector) end # Create a collection. # # new collection. If +strict+ is true, will raise an error if # collection +name+ already exists. # # @param [String, Symbol] name the name of the new collection. # # @option opts [Boolean] :capped (False) created a capped collection. # # @option opts [Integer] :size (Nil) If +capped+ is +true+, # specifies the maximum number of bytes for the capped collection. # If +false+, specifies the number of bytes allocated # for the initial extent of the collection. # # @option opts [Integer] :max (Nil) If +capped+ is +true+, indicates # the maximum number of records in a capped collection. # # @raise [MongoDBError] raised under two conditions: # either we're in +strict+ mode and the collection # already exists or collection creation fails on the server. # # @return [EM::Mongo::RequestResponse] Calls back with the new collection def create_collection(name) response = RequestResponse.new names_resp = collection_names names_resp.callback do |names| if names.include?(name.to_s) response.succeed EM::Mongo::Collection.new(@db_name, name, @em_connection) end # Create a new collection. oh = BSON::OrderedHash.new oh[:create] = name cmd_resp = command(oh) cmd_resp.callback do |doc| if EM::Mongo::Support.ok?(doc) response.succeed EM::Mongo::Collection.new(@db_name, name, @em_connection) else response.fail [MongoDBError, "Error creating collection: #{doc.inspect}"] end end cmd_resp.errback { |err| response.fail err } end names_resp.errback { |err| response.fail err } response end # Drop a collection by +name+. # # @param [String, Symbol] name # # @return [EM::Mongo::RequestResponse] Calls back with +true+ on success or +false+ if the collection name doesn't exist. def drop_collection(name) response = RequestResponse.new names_resp = collection_names names_resp.callback do |names| if names.include?(name.to_s) cmd_resp = command(:drop=>name) cmd_resp.callback do |doc| response.succeed EM::Mongo::Support.ok?(doc) end cmd_resp.errback { |err| response.fail err } else response.succeed false end end names_resp.errback { |err| response.fail err } response end # Drop an index from a given collection. Normally called from # Collection#drop_index or Collection#drop_indexes. # # @param [String] collection_name # @param [String] index_name # # @return [EM::Mongo::RequestResponse] returns +true+ on success. # # @raise MongoDBError if there's an error renaming the collection. def drop_index(collection_name, index_name) response = RequestResponse.new oh = BSON::OrderedHash.new oh[:deleteIndexes] = collection_name oh[:index] = index_name.to_s cmd_resp = command(oh, :check_response => false) cmd_resp.callback do |doc| if EM::Mongo::Support.ok?(doc) response.succeed(true) else response.fail [MongoDBError, "Error with drop_index command: #{doc.inspect}"] end end cmd_resp.errback do |err| response.fail err end response end # Get information on the indexes for the given collection. # Normally called by Collection#index_information. # # @param [String] collection_name # # @return [EM::Mongo::RequestResponse] Calls back with a hash where keys are index names and the values are lists of [key, direction] pairs # defining the index. def index_information(collection_name) response = RequestResponse.new sel = {:ns => full_collection_name(collection_name)} idx_resp = Cursor.new(self.collection(SYSTEM_INDEX_COLLECTION), :selector => sel).defer_as_a idx_resp.callback do |indexes| info = indexes.inject({}) do |info, index| info[index['name']] = index info end response.succeed info end idx_resp.errback do |err| fail err end response end # Run the getlasterror command with the specified replication options. # # @option opts [Boolean] :fsync (false) # @option opts [Integer] :w (nil) # @option opts [Integer] :wtimeout (nil) # # @return [EM::Mongo::RequestResponse] the entire response to getlasterror. # # @raise [MongoDBError] if the operation fails. def get_last_error(opts={}) response = RequestResponse.new cmd = BSON::OrderedHash.new cmd[:getlasterror] = 1 cmd.merge!(opts) cmd_resp = command(cmd, :check_response => false) cmd_resp.callback do |doc| if EM::Mongo::Support.ok?(doc) response.succeed doc else response.fail [MongoDBError, "error retrieving last error: #{doc.inspect}"] end end cmd_resp.errback { |err| response.fail err } response end # Return +true+ if an error was caused by the most recently executed # database operation. # # @return [EM::Mongo::RequestResponse] def error? response = RequestResponse.new err_resp = get_last_error err_resp.callback do |doc| response.succeed doc['err'] != nil end err_resp.errback do |err| response.fail err end response end # Reset the error history of this database # # Calls to DB#previous_error will only return errors that have occurred # since the most recent call to this method. # # @return [EM::Mongo::RequestResponse] def reset_error_history command(:reseterror => 1) end # A shortcut returning db plus dot plus collection name. # # @param [String] collection_name # # @return [String] def full_collection_name(collection_name) "#{name}.#{collection_name}" end # Send a command to the database. # # Note: DB commands must start with the "command" key. For this reason, # any selector containing more than one key must be an OrderedHash. # # Note also that a command in MongoDB is just a kind of query # that occurs on the system command collection ($cmd). Examine this method's implementation # to see how it works. # # @param [OrderedHash, Hash] selector an OrderedHash, or a standard Hash with just one # key, specifying the command to be performed. In Ruby 1.9, OrderedHash isn't necessary since # hashes are ordered by default. # # @option opts [Boolean] :check_response (true) If +true+, raises an exception if the # command fails. # @option opts [Socket] :socket a socket to use for sending the command. This is mainly for internal use. # # @return [EM::Mongo::RequestResponse] Calls back with a hash representing the result of the command # # @core commands command_instance-method def command(selector, opts={}) check_response = opts.fetch(:check_response, true) raise MongoArgumentError, "command must be given a selector" unless selector.is_a?(Hash) && !selector.empty? if selector.keys.length > 1 && RUBY_VERSION < '1.9' && selector.class != BSON::OrderedHash raise MongoArgumentError, "DB#command requires an OrderedHash when hash contains multiple keys" end response = RequestResponse.new cmd_resp = Cursor.new(self.collection(SYSTEM_COMMAND_COLLECTION), :limit => -1, :selector => selector).next_document cmd_resp.callback do |doc| if doc.nil? response.fail([OperationFailure, "Database command '#{selector.keys.first}' failed: returned null."]) elsif (check_response && !EM::Mongo::Support.ok?(doc)) response.fail([OperationFailure, "Database command '#{selector.keys.first}' failed: #{doc.inspect}"]) else response.succeed(doc) end end cmd_resp.errback do |err| response.fail([OperationFailure, "Database command '#{selector.keys.first}' failed: #{err[1]}"]) end response end # Authenticate with the given username and password. Note that mongod # must be started with the --auth option for authentication to be enabled. # # @param [String] username # @param [String] password # # @return [EM::Mongo::RequestResponse] Calls back with +true+ or +false+, indicating success or failure # # @raise [AuthenticationError] # # @core authenticate authenticate-instance_method def authenticate(username, password) response = RequestResponse.new auth_resp = self.collection(SYSTEM_COMMAND_COLLECTION).first({'getnonce' => 1}) auth_resp.callback do |res| if not res or not res['nonce'] response.succeed false else auth = BSON::OrderedHash.new auth['authenticate'] = 1 auth['user'] = username auth['nonce'] = res['nonce'] auth['key'] = EM::Mongo::Support.auth_key(username, password, res['nonce']) auth_resp2 = self.collection(SYSTEM_COMMAND_COLLECTION).first(auth) auth_resp2.callback do |res| if EM::Mongo::Support.ok?(res) response.succeed true else response.fail res end end auth_resp2.errback { |err| response.fail err } end end auth_resp.errback { |err| response.fail err } response end # Adds a user to this database for use with authentication. If the user already # exists in the system, the password will be updated. # # @param [String] username # @param [String] password # # @return [EM::Mongo::RequestResponse] Calls back with an object representing the user. def add_user(username, password) response = RequestResponse.new user_resp = self.collection(SYSTEM_USER_COLLECTION).first({:user => username}) user_resp.callback do |res| user = res || {:user => username} user['pwd'] = EM::Mongo::Support.hash_password(username, password) response.succeed self.collection(SYSTEM_USER_COLLECTION).save(user) end user_resp.errback { |err| response.fail err } response end end end em-mongo-0.5.1/lib/em-mongo/prev.rb0000644000004100000410000000275512313272263017062 0ustar www-datawww-datamodule EM module Mongo class Collection alias :new_find :find def find(selector={}, opts={}, &blk) raise "find requires a block" if not block_given? new_find(selector, opts).defer_as_a.callback do |docs| blk.call(docs) end end def first(selector={}, opts={}, &blk) opts[:limit] = 1 find(selector, opts) do |res| yield res.first end end end class Connection def insert(collection_name, documents) db_name, col_name = db_and_col_name(collection_name) db(db_name).collection(col_name).insert(documents) end def update(collection_name, selector, document, options={}) db_name, col_name = db_and_col_name(collection_name) db(db_name).collection(col_name).update(selector, document, options) end def delete(collection_name, selector) db_name, col_name = db_and_col_name(collection_name) db(db_name).collection(col_name).remove(selector) end def find(collection_name, skip, limit, order, query, fields, &blk) db_name, col_name = db_and_col_name(collection_name) db(db_name).collection(col_name).find(query, :skip=>skip,:limit=>limit,:order=>order,:fields=>fields).defer_as_a.callback do |docs| yield docs if block_given? end end def db_and_col_name(full_name) parts = full_name.split(".") [ parts.shift, parts.join(".") ] end end end end em-mongo-0.5.1/lib/em-mongo/server_response.rb0000644000004100000410000000160412313272263021322 0ustar www-datawww-datamodule EM::Mongo class ServerResponse attr_reader :size, :request_id, :response_to, :op, :result_flags, :cursor_id, :starting_from, :number_returned, :docs, :connection def initialize(buffer, connection) @connection = connection # Header @size = buffer.get_int @request_id = buffer.get_int @response_to = buffer.get_int @op = buffer.get_int # Response Header @result_flags = buffer.get_int @cursor_id = buffer.get_long @starting_from = buffer.get_int @number_returned = buffer.get_int # Documents pos = buffer.position @docs = (1..@number_returned).map do size= @connection.peek_size(buffer) doc = BSON::BSON_CODER.deserialize(buffer.to_s[pos,size]) pos += size buffer.position = pos doc end end end end em-mongo-0.5.1/lib/em-mongo/collection.rb0000644000004100000410000010036512313272263020235 0ustar www-datawww-datamodule EM::Mongo class Collection attr_accessor :connection # Initialize a collection object. # # @param [String, Symbol] db the name of the database to which this collection belongs. # @param [String, Symbol] ns the name of the collection # @param [Connection] connection the EM::Mongo::Connection that will service this collection # # @return [Collection] def initialize(db, ns, connection = nil) @db = db || "db" @ns = ns || "ns" @name = [@db,@ns].join('.') @connection = connection || EM::Mongo::Connection.new end # The database that this collection belongs to # @return [EM::Mongo::Database] def db connection.db(@db) end #The name of this collection # @return [String] def name @ns end # Return a sub-collection of this collection by name. If 'users' is a collection, then # 'users.comments' is a sub-collection of users. # # @param [String, Symbol] name # the collection to return # # @return [Collection] # the specified sub-collection def [](name) name = "#{self.name}.#{name}" db.collection(name) end # Query the database. # # The +selector+ argument is a prototype document that all results must # match. For example: # # collection.find({"hello" => "world"}) # # only matches documents that have a key "hello" with value "world". # Matches can have other keys *in addition* to "hello". # # @return [EM::Mongo::Cursor] # a cursor over the results of the query # # @param [Hash] selector # a document specifying elements which must be present for a # document to be included in the result set. Note that in rare cases, # (e.g., with $near queries), the order of keys will matter. To preserve # key order on a selector, use an instance of BSON::OrderedHash (only applies # to Ruby 1.8). # # @option opts [Array, Hash] :fields field names that should be returned in the result # set ("_id" will be included unless explicity excluded). By limiting results to a certain subset of fields, # you can cut down on network traffic and decoding time. If using a Hash, keys should be field # names and values should be either 1 or 0, depending on whether you want to include or exclude # the given field. # @option opts [Integer] :skip number of documents to skip from the beginning of the result set # @option opts [Integer] :limit maximum number of documents to return # @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc) # @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if # using MongoDB > 1.1 # @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query. # Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and # end of the query's execution. # For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database # @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per # GETMORE operation. A value of 0 will let the database server decide how many results to returns. # This option can be ignored for most use cases. # @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to # the normal cursor timeout behavior of the mongod process. Disabling the timeout is not supported by em-mongo # @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries.. # @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging). # @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging). # @option opts [Block] :transformer (nil) a block for tranforming returned documents. # This is normally used by object mappers to convert each returned document to an instance of a class. # # @raise [ArgumentError] # if timeout is set to false # # @raise [RuntimeError] # if given unknown options # # @core find find-instance_method def find(selector={}, opts={}) opts = opts.dup fields = opts.delete(:fields) fields = ["_id"] if fields && fields.empty? skip = opts.delete(:skip) || skip || 0 limit = opts.delete(:limit) || 0 sort = opts.delete(:sort) || opts.delete(:order) hint = opts.delete(:hint) snapshot = opts.delete(:snapshot) batch_size = opts.delete(:batch_size) timeout = (opts.delete(:timeout) == false) ? false : true max_scan = opts.delete(:max_scan) return_key = opts.delete(:return_key) transformer = opts.delete(:transformer) show_disk_loc = opts.delete(:max_scan) if timeout == false raise ArgumentError, "EM::Mongo::Collection#find does not support disabling the timeout" end if hint hint = normalize_hint_fields(hint) end raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty? EM::Mongo::Cursor.new(self, { :selector => selector, :fields => fields, :skip => skip, :limit => limit, :order => sort, :hint => hint, :snapshot => snapshot, :timeout => timeout, :batch_size => batch_size, :transformer => transformer, :max_scan => max_scan, :show_disk_loc => show_disk_loc, :return_key => return_key }) end # Return a single object from the database. # # @return [EM::Mongo::RequestResponse] # calls back with a single document or nil if no result is found. # # @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements # which must be present for a document to be included in the result set or an # instance of ObjectId to be used as the value for an _id query. # If nil, an empty selector, {}, will be used. # # @option opts [Hash] # any valid options that can be send to Collection#find # # @raise [TypeError] # if the argument is of an improper type. def find_one(spec_or_object_id=nil, opts={}) spec = case spec_or_object_id when nil {} when BSON::ObjectId {:_id => spec_or_object_id} when Hash spec_or_object_id else raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil" end find(spec, opts.merge(:limit => -1)).next_document end alias :first :find_one # Insert one or more documents into the collection. # # @param [Hash, Array] doc_or_docs # a document (as a hash) or array of documents to be inserted. # # @return [ObjectId, Array] # The _id of the inserted document or a list of _ids of all inserted documents. # # @see DB#remove for options that can be passed to :safe. # # @core insert insert-instance_method def insert(doc_or_docs) safe_insert(doc_or_docs, :safe => false).data end alias_method :<<, :insert # Insert one or more documents into the collection, with a failure if the operation doesn't succeed # Unlike insert, this method returns a deferrable # # @param [Hash, Array] doc_or_docs # a document (as a hash) or array of documents to be inserted. # # @return [EM::Mongo::RequestResponse] # Calls backw ith the _id of the inserted document or a list of _ids of all inserted documents. # # @option opts [Boolean, Hash] :safe (+true+) # run the operation in safe mode, which run a getlasterror command on the # database to report any assertion. In addition, a hash can be provided to # run an fsync and/or wait for replication of the insert (>= 1.5.1). Safe # options provided here will override any safe options set on this collection, # its database object, or the current connection. See the options on # for DB#get_last_error. # # @see DB#remove for options that can be passed to :safe. # # @core insert insert-instance_method def safe_insert(doc_or_docs, safe_opts = {}) response = RequestResponse.new safe_opts[:safe] = true unless safe_opts[:safe] == false doc_or_docs = [doc_or_docs] unless doc_or_docs.is_a?(Array) doc_or_docs.map! { |doc| sanitize_id!(doc) } insert_resp = insert_documents(doc_or_docs, @ns, true, safe_opts) insert_resp.callback do |ids| ids.length > 1 ? response.succeed(ids) : response.succeed(ids[0]) end insert_resp.errback do |err| response.fail err end response end # Update one or more documents in this collection. # # @param [Hash] selector # a hash specifying elements which must be present for a document to be updated. Note: # the update command currently updates only the first document matching the # given selector. If you want all matching documents to be updated, be sure # to specify :multi => true. # @param [Hash] document # a hash specifying the fields to be changed in the selected document, # or (in the case of an upsert) the document to be inserted # # @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert) # @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to # just the first matching document. Note: only works in MongoDB 1.1.3 or later. # # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode. # Otherwise, returns true. # # @core update update-instance_method def update(selector, document, opts={}) # Initial byte is 0. safe_update(selector, document, opts.merge(:safe => false)).data end # Update one or more documents in this collection. # # @param [Hash] selector # a hash specifying elements which must be present for a document to be updated. Note: # the update command currently updates only the first document matching the # given selector. If you want all matching documents to be updated, be sure # to specify :multi => true. # @param [EM::Mongo::RequestResponse] document # calls back with a hash specifying the fields to be changed in the selected document, # or (in the case of an upsert) the document to be inserted # # @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert) # @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to # just the first matching document. Note: only works in MongoDB 1.1.3 or later. # @option opts [Boolean] :safe (+true+) # If true, check that the save succeeded. OperationFailure # will be raised on an error. Note that a safe check requires an extra # round-trip to the database. Safe options provided here will override any safe # options set on this collection, its database object, or the current collection. # See the options for DB#get_last_error for details. # # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode. # Otherwise, returns true. # # @core update update-instance_method def safe_update(selector, document, opts={}) response = RequestResponse.new opts = opts.dup opts[:safe] = true unless opts[:safe] == false # Initial byte is 0. message = BSON::ByteBuffer.new("\0\0\0\0") BSON::BSON_RUBY.serialize_cstr(message, "#{@db}.#{@ns}") update_options = 0 update_options += 1 if opts.delete(:upsert) update_options += 2 if opts.delete(:multi) message.put_int(update_options) message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s) message.put_binary(BSON::BSON_CODER.serialize(document, false, true).to_s) if opts[:safe] send_resp = safe_send(EM::Mongo::OP_UPDATE, message, true, opts) send_resp.callback { response.succeed(true) } send_resp.errback { |err| response.fail(err) } else @connection.send_command(EM::Mongo::OP_UPDATE, message) response.succeed(true) end response end # Save a document to this collection. # # @param [Hash] doc # the document to be saved. If the document already has an '_id' key, # then an update (upsert) operation will be performed, and any existing # document with that _id is overwritten. Otherwise an insert operation is performed. # # @return [ObjectId] the _id of the saved document. # def save(doc, opts={}) safe_save(doc, opts.merge(:safe => false)).data end # Save a document to this collection. # # @param [Hash] doc # the document to be saved. If the document already has an '_id' key, # then an update (upsert) operation will be performed, and any existing # document with that _id is overwritten. Otherwise an insert operation is performed. # # @return [EM::Mongo::RequestResponse] Calls backw with the _id of the saved document. # # @option opts [Boolean, Hash] :safe (+true+) # run the operation in safe mode, which run a getlasterror command on the # database to report any assertion. In addition, a hash can be provided to # run an fsync and/or wait for replication of the save (>= 1.5.1). See the options # for DB#error. # def safe_save(doc, opts={}) opts[:safe] = true unless opts[:safe] == false id = has_id?(doc) sanitize_id!(doc) if id safe_update({:_id => id}, doc, opts.merge(:upsert => true)) else safe_insert(doc, opts) end end # Remove all documents from this collection. # # @param [Hash] selector # If specified, only matching documents will be removed. # # @option opts [Boolean, Hash] :safe (+false+) # run the operation in safe mode, which will run a getlasterror command on the # database to report any assertion. In addition, a hash can be provided to # run an fsync and/or wait for replication of the remove (>= 1.5.1). Safe # options provided here will override any safe options set on this collection, # its database, or the current connection. See the options for DB#get_last_error for more details. # # @example remove all documents from the 'users' collection: # users.remove # users.remove({}) # # @example remove only documents that have expired: # users.remove({:expire => {"$lte" => Time.now}}) # # @return [true] Returns true. # # @see DB#remove for options that can be passed to :safe. # # @core remove remove-instance_method def remove(selector={}, opts={}) # Initial byte is 0. message = BSON::ByteBuffer.new("\0\0\0\0") BSON::BSON_RUBY.serialize_cstr(message, "#{@db}.#{@ns}") message.put_int(0) message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s) @connection.send_command(EM::Mongo::OP_DELETE, message) true end # Drop the entire collection. USE WITH CAUTION. def drop db.drop_collection(@ns) end # Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0) # # @option opts [Hash] :query ({}) a query selector document for matching the desired document. # @option opts [Hash] :update (nil) the update operation to perform on the matched document. # @option opts [Array, String, OrderedHash] :sort ({}) specify a sort option for the query using any # of the sort options available for Cursor#sort. Sort order is important if the query will be matching # multiple documents since only the first matching document will be updated and returned. # @option opts [Boolean] :remove (false) If true, removes the the returned document from the collection. # @option opts [Boolean] :new (false) If true, returns the updated document; otherwise, returns the document # prior to update. # # @return [EM::Mongo::RequestResponse] Calls back with the matched document. # # @core findandmodify find_and_modify-instance_method def find_and_modify(opts={}) response = RequestResponse.new cmd = BSON::OrderedHash.new cmd[:findandmodify] = @ns cmd.merge!(opts) cmd[:sort] = EM::Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort] cmd_resp = db.command(cmd) cmd_resp.callback do |doc| response.succeed doc['value'] end cmd_resp.errback do |err| response.fail err end response end # Perform a map-reduce operation on the current collection. # # @param [String, BSON::Code] map a map function, written in JavaScript. # @param [String, BSON::Code] reduce a reduce function, written in JavaScript. # # @option opts [Hash] :query ({}) a query selector document, like what's passed to #find, to limit # the operation to a subset of the collection. # @option opts [Array] :sort ([]) an array of [key, direction] pairs to sort by. Direction should # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc) # @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection. # @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the # map/reduce operation has finished. # @option opts [String] :out (nil) a valid output type. In versions of MongoDB prior to v1.7.6, # this option takes the name of a collection for the output results. In versions 1.7.6 and later, # this option specifies the output type. See the core docs for available output types. # @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The defualt # is false. Note that this option has no effect is versions of MongoDB > v1.7.6. # @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time. # @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not # the instantiated collection that's returned by default. Note if a collection name isn't returned in the # map-reduce output (as, for example, when using :out => {:inline => 1}), then you must specify this option # or an ArgumentError will be raised. # # @return [EM::Mongo::RequestResponse] Calls back with a EM::Mongo::Collection object or a Hash with the map-reduce command's results. # # @raise ArgumentError if you specify {:out => {:inline => true}} but don't specify :raw => true. # # @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation. # # @core mapreduce map_reduce-instance_method def map_reduce(map, reduce, opts={}) response = RequestResponse.new map = BSON::Code.new(map) unless map.is_a?(BSON::Code) reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code) raw = opts.delete(:raw) hash = BSON::OrderedHash.new hash['mapreduce'] = @ns hash['map'] = map hash['reduce'] = reduce hash.merge! opts cmd_resp = db.command(hash) cmd_resp.callback do |result| if EM::Mongo::Support.ok?(result) == false response.fail [Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"] elsif raw response.succeed result elsif result["result"] response.succeed db.collection(result["result"]) else response.fail [ArgumentError, "Could not instantiate collection from result. If you specified " + "{:out => {:inline => true}}, then you must also specify :raw => true to get the results."] end end cmd_resp.errback do |err| response.fail(err) end response end alias :mapreduce :map_reduce # Return a list of distinct values for +key+ across all # documents in the collection. The key may use dot notation # to reach into an embedded object. # # @param [String, Symbol, OrderedHash] key or hash to group by. # @param [Hash] query a selector for limiting the result set over which to group. # # @example Saving zip codes and ages and returning distinct results. # @collection.save({:zip => 10010, :name => {:age => 27}}) # @collection.save({:zip => 94108, :name => {:age => 24}}) # @collection.save({:zip => 10010, :name => {:age => 27}}) # @collection.save({:zip => 99701, :name => {:age => 24}}) # @collection.save({:zip => 94108, :name => {:age => 27}}) # # @collection.distinct(:zip) # [10010, 94108, 99701] # @collection.distinct("name.age") # [27, 24] # # # You may also pass a document selector as the second parameter # # to limit the documents over which distinct is run: # @collection.distinct("name.age", {"name.age" => {"$gt" => 24}}) # [27] # # @return [EM::Mongo::RequestResponse] Calls back with an array of distinct values. def distinct(key, query=nil) raise MongoArgumentError unless [String, Symbol].include?(key.class) response = RequestResponse.new command = BSON::OrderedHash.new command[:distinct] = @ns command[:key] = key.to_s command[:query] = query cmd_resp = db.command(command) cmd_resp.callback do |resp| response.succeed resp["values"] end cmd_resp.errback do |err| response.fail err end response end # Perform a group aggregation. # # @param [Hash] opts the options for this group operation. The minimum required are :initial # and :reduce. # # @option opts [Array, String, Symbol] :key (nil) Either the name of a field or a list of fields to group by (optional). # @option opts [String, BSON::Code] :keyf (nil) A JavaScript function to be used to generate the grouping keys (optional). # @option opts [String, BSON::Code] :cond ({}) A document specifying a query for filtering the documents over # which the aggregation is run (optional). # @option opts [Hash] :initial the initial value of the aggregation counter object (required). # @option opts [String, BSON::Code] :reduce (nil) a JavaScript aggregation function (required). # @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies # each of the resultant grouped objects. Available only when group is run with command # set to true. # # @return [EM::Mongo::RequestResponse] calls back with the command response consisting of grouped items. def group(opts={}) response = RequestResponse.new reduce = opts[:reduce] finalize = opts[:finalize] cond = opts.fetch(:cond, {}) initial = opts[:initial] if !(reduce && initial) raise MongoArgumentError, "Group requires at minimum values for initial and reduce." end cmd = { "group" => { "ns" => @ns, "$reduce" => reduce.to_bson_code, "cond" => cond, "initial" => initial } } if finalize cmd['group']['finalize'] = finalize.to_bson_code end if key = opts[:key] if key.is_a?(String) || key.is_a?(Symbol) key = [key] end key_value = {} key.each { |k| key_value[k] = 1 } cmd["group"]["key"] = key_value elsif keyf = opts[:keyf] cmd["group"]["$keyf"] = keyf.to_bson_code end cmd_resp = db.command(cmd) cmd_resp.callback do |result| response.succeed result["retval"] end cmd_resp.errback do |err| response.fail err end response end # Get the number of documents in this collection. # # @return [EM::Mongo::RequestResponse] def count find().count end alias :size :count # Return stats on the collection. Uses MongoDB's collstats command. # # @return [EM::Mongo::RequestResponse] def stats @db.command({:collstats => @name}) end # Get information on the indexes for this collection. # # @return [EM::Mongo::RequestResponse] Calls back with a hash where the keys are index names. # # @core indexes def index_information db.index_information(@ns) end # Create a new index. # # @param [String, Array] spec # should be either a single field name or an array of # [field name, direction] pairs. Directions should be specified # as EM::Mongo::ASCENDING, EM::Mongo::DESCENDING, EM::Mongo::FLAT2D, EM::Mongo::SPHERE2D # # Note that MongoDB 2.2 used 2d flat indexes and called them geo, MongoDB 2.4 has 2d and 2dsphere indexes # EM::Mongo::GEO2D is kept for backward compatiblity and is creating a flat 2d index # # Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too, # that in order to geo-index a given field, that field must reference either an array or a sub-object # where the first two values represent x- and y-coordinates. Examples can be seen below. # # Also note that it is permissible to create compound indexes that include a geospatial index as # long as the geospatial index comes first. # # If your code calls create_index frequently, you can use Collection#ensure_index to cache these calls # and thereby prevent excessive round trips to the database. # # @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint. # @option opts [Boolean] :background (false) indicate that the index should be built in the background. This # feature is only available in MongoDB >= 1.3.2. # @option opts [Boolean] :drop_dups (nil) If creating a unique index on a collection with pre-existing records, # this option will keep the first document the database indexes and drop all subsequent with duplicate values. # @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index. # @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index. # # @example Creating a compound index: # @posts.create_index([['subject', EM::Mongo::ASCENDING], ['created_at', EM::Mongo::DESCENDING]]) # # @example Creating a geospatial index: # @restaurants.create_index([['location', EM::Mongo::SPHERE2D]]) # # # Note that this will work only if 'location' represents x,y coordinates: # {'location': [0, 50]} # {'location': {'x' => 0, 'y' => 50}} # {'location': {'latitude' => 0, 'longitude' => 50}} # # @example A geospatial index with alternate longitude and latitude: # @restaurants.create_index([['location', EM::Mongo::SPHERE2D]], :min => 500, :max => 500) # # @return [String] the name of the index created. # # @core indexes create_index-instance_method def create_index(spec, opts={}) field_spec = parse_index_spec(spec) opts = opts.dup name = opts.delete(:name) || generate_index_name(field_spec) name = name.to_s if name generate_indexes(field_spec, name, opts) name end # Drop a specified index. # # @param [EM::Mongo::RequestResponse] name # # @core indexes def drop_index(name) if name.is_a?(Array) response = RequestResponse.new name_resp = index_name(name) name_resp.callback do |name| drop_resp = db.drop_index(@ns, name) drop_resp.callback { response.succeed } drop_resp.errback { |err| response.fail(err) } end name_resp.errback { |err| response.fail(err) } response else db.drop_index(@ns, name) end end # Drop all indexes. # # @core indexes def drop_indexes # Note: calling drop_indexes with no args will drop them all. db.drop_index(@ns, '*') end protected def normalize_hint_fields(hint) case hint when String {hint => 1} when Hash hint when nil nil else h = BSON::OrderedHash.new hint.to_a.each { |k| h[k] = 1 } h end end private def has_id?(doc) # mongo-ruby-driver seems to take :_id over '_id' for some reason id = doc[:_id] || doc['_id'] return id if id nil end def sanitize_id!(doc) doc[:_id] = has_id?(doc) || BSON::ObjectId.new doc.delete('_id') doc end # Sends a Mongo::Constants::OP_INSERT message to the database. # Takes an array of +documents+, an optional +collection_name+, and a # +check_keys+ setting. def insert_documents(documents, collection_name=@name, check_keys = true, safe_options={}) response = RequestResponse.new # Initial byte is 0. message = BSON::ByteBuffer.new("\0\0\0\0") BSON::BSON_RUBY.serialize_cstr(message, "#{@db}.#{collection_name}") documents.each do |doc| message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true).to_s) end raise InvalidOperation, "Exceded maximum insert size of 16,000,000 bytes" if message.size > 16_000_000 ids = documents.collect { |o| o[:_id] || o['_id'] } if safe_options[:safe] send_resp = safe_send(EM::Mongo::OP_INSERT, message, ids, safe_options) send_resp.callback { response.succeed(ids) } send_resp.errback { |err| response.fail(err) } else @connection.send_command(EM::Mongo::OP_INSERT, message) response.succeed(ids) end response end def safe_send(op, message, return_val, options={}) response = RequestResponse.new options[:safe] = true options[:db_name] = @db @connection.send_command(op, message, options) do |server_resp| docs = server_resp.docs if server_resp.number_returned == 1 && (error = docs[0]['err'] || docs[0]['errmsg']) @connection.close if error == "not master" error = "wtimeout" if error == "timeout" response.fail [EM::Mongo::OperationFailure, "#{docs[0]['code']}: #{error}"] else response.succeed(return_val) end end response end def index_name(spec) response = RequestResponse.new field_spec = parse_index_spec(spec) info_resp = index_information info_resp.callback do |indexes| found = indexes.values.find do |index| index['key'] == field_spec end response.succeed( found ? found['name'] : nil ) end info_resp.errback do |err| response.fail err end response end def parse_index_spec(spec) field_spec = BSON::OrderedHash.new if spec.is_a?(String) || spec.is_a?(Symbol) field_spec[spec.to_s] = 1 elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) } spec.each do |f| if [EM::Mongo::ASCENDING, EM::Mongo::DESCENDING, EM::Mongo::SPHERE2D, EM::Mongo::FLAT2D, EM::Mongo::GEO2D].include?(f[1]) field_spec[f[0].to_s] = f[1] else raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " + "should be one of EM::Mongo::ASCENDING (1), EM::Mongo::DESCENDING (-1), EM::Mongo::SPHERE2D ('2dsphere')" + " or EM::Mongo::FLAT2D ('2d') (GEO2D is deprecated)" end end else raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " + "should be either a string, symbol, or an array of arrays." end field_spec end def generate_indexes(field_spec, name, opts) selector = { :name => name, :ns => "#{@db}.#{@ns}", :key => field_spec } selector.merge!(opts) insert_documents([selector], EM::Mongo::Database::SYSTEM_INDEX_COLLECTION, false) end def generate_index_name(spec) indexes = [] spec.each_pair do |field, direction| indexes.push("#{field}_#{direction}") end indexes.join("_") end end end em-mongo-0.5.1/lib/em-mongo/cursor.rb0000644000004100000410000004026312313272263017417 0ustar www-datawww-data# encoding: UTF-8 # Copyright (C) 2008-2011 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module EM::Mongo # A cursor over query results. Returned objects are hashes. class Cursor include EM::Mongo::Conversions #include Enumerable attr_reader :collection, :selector, :fields, :order, :hint, :snapshot, :timeout, :full_collection_name, :transformer # Create a new cursor. # # Note: cursors are created when executing queries using [Collection#find] and other # similar methods. Application developers shouldn't have to create cursors manually. # # @return [Cursor] # # @core cursors constructor_details def initialize(collection, opts={}) @cursor_id = nil @db = collection.db @collection = collection @connection = @db.connection #@logger = @connection.logger # Query selector @selector = opts[:selector] || {} # Special operators that form part of $query @order = opts[:order] @explain = opts[:explain] @hint = opts[:hint] @snapshot = opts[:snapshot] @max_scan = opts.fetch(:max_scan, nil) @return_key = opts.fetch(:return_key, nil) @show_disk_loc = opts.fetch(:show_disk_loc, nil) # Wire-protocol settings @fields = convert_fields_for_query(opts[:fields]) @skip = opts[:skip] || 0 @limit = opts[:limit] || 0 @tailable = opts[:tailable] || false @timeout = opts.fetch(:timeout, true) # Use this socket for the query #@socket = opts[:socket] @closed = false @query_run = false @transformer = opts[:transformer] batch_size(opts[:batch_size] || 0) @full_collection_name = "#{@collection.db.name}.#{@collection.name}" @cache = [] @returned = 0 if @collection.name =~ /^\$cmd/ || @collection.name =~ /^system/ @command = true else @command = false end end # Get the next document specified the cursor options. # # @return [EM::Mongo::RequestResponse] Calls back with the next document or Nil if no documents remain. def next_document response = RequestResponse.new if @cache.length == 0 refresh.callback do check_and_transform_document(@cache.shift, response) end else check_and_transform_document(@cache.shift, response) end response end alias :next :next_document def check_and_transform_document(doc, response) return response.succeed(nil) if doc.nil? if doc['$err'] err = doc['$err'] # If the server has stopped being the master (e.g., it's one of a # pair but it has died or something like that) then we close that # connection. The next request will re-open on master server. if err == "not master" @connection.close response.fail([ConnectionFailure, err]) else response.fail([OperationFailure, err]) end else response.succeed( @transformer ? @transformer.call(doc) : doc ) end end private :check_and_transform_document # Reset this cursor on the server. Cursor options, such as the # query string and the values for skip and limit, are preserved. def rewind! close @cache.clear @cursor_id = nil @closed = false @query_run = false @n_received = nil end # Determine whether this cursor has any remaining results. # # @return [EM::Mongo::RequestResponse] def has_next? response = RequestResponse.new num_resp = num_remaining num_resp.callback { |num| response.succeed( num > 0 ) } num_resp.errback { |err| response.fail err } response end # Get the size of the result set for this query. # # @param [Boolean] whether of not to take notice of skip and limit # # @return [EM::Mongo::RequestResponse] Calls back with the number of objects in the result set for this query. # # @raise [OperationFailure] on a database error. def count(skip_and_limit = false) response = RequestResponse.new command = BSON::OrderedHash["count", @collection.name, "query", @selector] if skip_and_limit command.merge!(BSON::OrderedHash["limit", @limit]) if @limit != 0 command.merge!(BSON::OrderedHash["skip", @skip]) if @skip != 0 end command.merge!(BSON::OrderedHash["fields", @fields]) cmd_resp = @db.command(command) cmd_resp.callback { |doc| response.succeed( doc['n'].to_i ) } cmd_resp.errback do |err| if err[1] =~ /ns missing/ response.succeed(0) else response.fail([OperationFailure, "Count failed: #{err[1]}"]) end end response end # Sort this cursor's results. # # This method overrides any sort order specified in the Collection#find # method, and only the last sort applied has an effect. # # @param [Symbol, Array] key_or_list either 1) a key to sort by or 2) # an array of [key, direction] pairs to sort by. Direction should # be specified as EM::Mongo::ASCENDING (or :ascending / :asc) or EM::Mongo::DESCENDING (or :descending / :desc) # # @raise [InvalidOperation] if this cursor has already been used. # # @raise [InvalidSortValueError] if the specified order is invalid. def sort(key_or_list, direction=nil) check_modifiable if !direction.nil? order = [[key_or_list, direction]] else order = key_or_list end @order = order self end # Limit the number of results to be returned by this cursor. # # This method overrides any limit specified in the Collection#find method, # and only the last limit applied has an effect. # # @return [Integer] the current number_to_return if no parameter is given. # # @raise [InvalidOperation] if this cursor has already been used. # # @core limit limit-instance_method def limit(number_to_return=nil) return @limit unless number_to_return check_modifiable @limit = number_to_return self end # Skips the first +number_to_skip+ results of this cursor. # Returns the current number_to_skip if no parameter is given. # # This method overrides any skip specified in the Collection#find method, # and only the last skip applied has an effect. # # @return [Integer] # # @raise [InvalidOperation] if this cursor has already been used. def skip(number_to_skip=nil) return @skip unless number_to_skip check_modifiable @skip = number_to_skip self end # Set the batch size for server responses. # # Note that the batch size will take effect only on queries # where the number to be returned is greater than 100. # # @param [Integer] size either 0 or some integer greater than 1. If 0, # the server will determine the batch size. # # @return [Cursor] def batch_size(size=0) check_modifiable if size < 0 || size == 1 raise ArgumentError, "Invalid value for batch_size #{size}; must be 0 or > 1." else @batch_size = size > @limit ? @limit : size end self end # Iterate over each document in this cursor, yielding it to the given # block. # # Iterating over an entire cursor will close it. # # @yield passes each document to a block for processing. When the cursor is empty, # each will yield a nil value # # @example if 'comments' represents a collection of comments: # comments.find.each do |doc| # if doc # puts doc['user'] # end # end def each(&blk) raise "A callback block is required for #each" unless blk EM.next_tick do next_doc_resp = next_document next_doc_resp.callback do |doc| blk.call(doc) doc.nil? ? close : self.each(&blk) end next_doc_resp.errback do |err| if blk.arity > 1 blk.call(:error, err) else blk.call(:error) end end end end # Receive all the documents from this cursor as an array of hashes. # # Notes: # # If you've already started iterating over the cursor, the array returned # by this method contains only the remaining documents. See Cursor#rewind! if you # need to reset the cursor. # # Use of this method is discouraged - in most cases, it's much more # efficient to retrieve documents as you need them by iterating over the cursor. # # @return [EM::Mongo::RequestResponse] Calls back with an array of documents. def defer_as_a response = RequestResponse.new items = [] self.each do |doc,err| if doc == :error response.fail(err) elsif doc items << doc else response.succeed(items) end end response end # XXX to_a is confusing but we will leave it for now alias to_a defer_as_a # Get the explain plan for this cursor. # # @return [EM::Mongo::RequestResponse] Calls back with a document containing the explain plan for this cursor. # # @core explain explain-instance_method def explain response = RequestResponse.new c = Cursor.new(@collection, query_options_hash.merge(:limit => -@limit.abs, :explain => true)) exp_response = c.next_document exp_response.callback do |explanation| c.close response.succeed(explanation) end exp_response.errback do |err| c.close response.fail(err) end response end # Close the cursor. # # Note: if a cursor is read until exhausted (read until EM::Mongo::Constants::OP_QUERY or # EM::Mongo::Constants::OP_GETMORE returns zero for the cursor id), there is no need to # close it manually. # # Note also: Collection#find takes an optional block argument which can be used to # ensure that your cursors get closed. # # @return [True] def close if @cursor_id && @cursor_id != 0 @cursor_id = 0 @closed = true message = BSON::ByteBuffer.new([0, 0, 0, 0]) message.put_int(1) message.put_long(@cursor_id) @connection.send_command(EM::Mongo::OP_KILL_CURSORS, message) end true end # Is this cursor closed? # # @return [Boolean] def closed?; @closed; end # Returns an integer indicating which query options have been selected. # # @return [Integer] # # @see http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol#MongoWireProtocol-EM::Mongo::Constants::OPQUERY # The MongoDB wire protocol. def query_opts opts = 0 opts |= EM::Mongo::OP_QUERY_NO_CURSOR_TIMEOUT unless @timeout opts |= EM::Mongo::OP_QUERY_SLAVE_OK if @connection.slave_ok? opts |= EM::Mongo::OP_QUERY_TAILABLE if @tailable opts end # Get the query options for this Cursor. # # @return [Hash] def query_options_hash { :selector => @selector, :fields => @fields, :skip => @skip, :limit => @limit, :order => @order, :hint => @hint, :snapshot => @snapshot, :timeout => @timeout, :max_scan => @max_scan, :return_key => @return_key, :show_disk_loc => @show_disk_loc } end # Clean output for inspect. def inspect "" end private # Convert the +:fields+ parameter from a single field name or an array # of fields names to a hash, with the field names for keys and '1' for each # value. def convert_fields_for_query(fields) case fields when String, Symbol {fields => 1} when Array return nil if fields.length.zero? fields.each_with_object({}) { |field, hash| hash[field] = 1 } when Hash return fields end end # Return the number of documents remaining for this cursor. # @return [EM::Mongo::RequestResponse] def num_remaining response = RequestResponse.new if @cache.length == 0 ref_resp = refresh ref_resp.callback { response.succeed(@cache.length) } ref_resp.errback { |err| response.fail err } else response.succeed(@cache.length) end response end def refresh return RequestResponse.new.tap{|d|d.succeed} if @cursor_id && @cursor_id.zero? return send_initial_query unless @query_run message = BSON::ByteBuffer.new([0, 0, 0, 0]) # DB name. BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@collection.name}") # Number of results to return. if @limit > 0 limit = @limit - @returned if @batch_size > 0 limit = limit < @batch_size ? limit : @batch_size end message.put_int(limit) else message.put_int(@batch_size) end # Cursor id. message.put_long(@cursor_id) response = RequestResponse.new @connection.send_command(EM::Mongo::OP_GET_MORE, message) do |resp| if resp == :disconnected response.fail(:disconnected) else @cache += resp.docs @n_received = resp.number_returned @returned += @n_received close_cursor_if_query_complete response.succeed end end response end # Run query the first time we request an object from the wire def send_initial_query response = RequestResponse.new message = construct_query_message @connection.send_command(EM::Mongo::OP_QUERY, message) do |resp| if resp == :disconnected response.fail(:disconnected) else @cache += resp.docs @n_received = resp.number_returned @cursor_id = resp.cursor_id @returned += @n_received @query_run = true close_cursor_if_query_complete response.succeed end end response end def construct_query_message message = BSON::ByteBuffer.new message.put_int(query_opts) BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@collection.name}") message.put_int(@skip) message.put_int(@limit) spec = query_contains_special_fields? ? construct_query_spec : @selector message.put_binary(BSON::BSON_CODER.serialize(spec, false).to_s) message.put_binary(BSON::BSON_CODER.serialize(@fields, false).to_s) if @fields message end def construct_query_spec return @selector if @selector.has_key?('$query') spec = BSON::OrderedHash.new spec['$query'] = @selector spec['$orderby'] = EM::Mongo::Support.format_order_clause(@order) if @order spec['$hint'] = @hint if @hint && @hint.length > 0 spec['$explain'] = true if @explain spec['$snapshot'] = true if @snapshot spec['$maxscan'] = @max_scan if @max_scan spec['$returnKey'] = true if @return_key spec['$showDiskLoc'] = true if @show_disk_loc spec end # Returns true if the query contains order, explain, hint, or snapshot. def query_contains_special_fields? @order || @explain || @hint || @snapshot end def to_s "DBResponse(flags=#@result_flags, cursor_id=#@cursor_id, start=#@starting_from)" end def close_cursor_if_query_complete close if @limit > 0 && @returned >= @limit end def check_modifiable if @query_run || @closed raise InvalidOperation, "Cannot modify the query once it has been run or closed." end end end end em-mongo-0.5.1/lib/em-mongo/exceptions.rb0000644000004100000410000000454312313272263020264 0ustar www-datawww-datarequire "timeout" # encoding: UTF-8 # # -- # Copyright (C) 2008-2011 10gen Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ++ module EM::Mongo # Generic Mongo Ruby Driver exception class. class MongoRubyError < StandardError; end # Raised when MongoDB itself has returned an error. class MongoDBError < RuntimeError; end # Raised when configuration options cause connections, queries, etc., to fail. class ConfigurationError < MongoRubyError; end # Raised on fatal errors to GridFS. class GridError < MongoRubyError; end # Raised on fatal errors to GridFS. class GridFileNotFound < GridError; end # Raised on fatal errors to GridFS. class GridMD5Failure < GridError; end # Raised when invalid arguments are sent to Mongo Ruby methods. class MongoArgumentError < MongoRubyError; end # Raised on failures in connection to the database server. class ConnectionError < MongoRubyError; end # Raised on failures in connection to the database server. class ReplicaSetConnectionError < ConnectionError; end # Raised on failures in connection to the database server. class ConnectionTimeoutError < MongoRubyError; end # Raised when a connection operation fails. class ConnectionFailure < MongoDBError; end # Raised when authentication fails. class AuthenticationError < MongoDBError; end # Raised when a database operation fails. class OperationFailure < MongoDBError; end # Raised when a socket read operation times out. class OperationTimeout < ::Timeout::Error; end # Raised when a client attempts to perform an invalid operation. class InvalidOperation < MongoDBError; end # Raised when an invalid collection or database name is used (invalid namespace name). class InvalidNSName < RuntimeError; end # Raised when the client supplies an invalid value to sort by. class InvalidSortValueError < MongoRubyError; end end em-mongo-0.5.1/lib/em-mongo.rb0000644000004100000410000000200612313272263016073 0ustar www-datawww-datarequire "eventmachine" begin require "bson_ext" rescue LoadError require "bson" end module EM::Mongo module Version STRING = File.read(File.dirname(__FILE__) + '/../VERSION') MAJOR, MINOR, TINY = STRING.split('.') end NAME = 'em-mongo' LIBPATH = ::File.expand_path(::File.dirname(__FILE__)) + ::File::SEPARATOR PATH = ::File.dirname(LIBPATH) + ::File::SEPARATOR end require File.join(EM::Mongo::LIBPATH, "em-mongo/conversions") require File.join(EM::Mongo::LIBPATH, "em-mongo/support") require File.join(EM::Mongo::LIBPATH, "em-mongo/database") require File.join(EM::Mongo::LIBPATH, "em-mongo/connection") require File.join(EM::Mongo::LIBPATH, "em-mongo/collection") require File.join(EM::Mongo::LIBPATH, "em-mongo/exceptions") require File.join(EM::Mongo::LIBPATH, "em-mongo/cursor") require File.join(EM::Mongo::LIBPATH, "em-mongo/request_response") require File.join(EM::Mongo::LIBPATH, "em-mongo/server_response") require File.join(EM::Mongo::LIBPATH, "em-mongo/core_ext") EMMongo = EM::Mongo em-mongo-0.5.1/metadata.yml0000644000004100000410000000530512313272263015572 0ustar www-datawww-data--- !ruby/object:Gem::Specification name: em-mongo version: !ruby/object:Gem::Version version: 0.5.1 platform: ruby authors: - bcg - PlasticLizard autorequire: bindir: bin cert_chain: [] date: 2010-12-01 00:00:00.000000000 Z dependencies: - !ruby/object:Gem::Dependency name: eventmachine requirement: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: 0.12.10 type: :runtime prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: 0.12.10 - !ruby/object:Gem::Dependency name: bson requirement: !ruby/object:Gem::Requirement requirements: - - "~>" - !ruby/object:Gem::Version version: 1.9.2 type: :runtime prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - "~>" - !ruby/object:Gem::Version version: 1.9.2 description: EventMachine driver for MongoDB. email: brenden.grace@gmail.com executables: [] extensions: [] extra_rdoc_files: - README.rdoc files: - ".gitignore" - CHANGELOG - Gemfile - README.rdoc - Rakefile - VERSION - em-mongo.gemspec - examples/legacy.rb - examples/readme.rb - lib/em-mongo.rb - lib/em-mongo/collection.rb - lib/em-mongo/connection.rb - lib/em-mongo/conversions.rb - lib/em-mongo/core_ext.rb - lib/em-mongo/cursor.rb - lib/em-mongo/database.rb - lib/em-mongo/exceptions.rb - lib/em-mongo/prev.rb - lib/em-mongo/request_response.rb - lib/em-mongo/server_response.rb - lib/em-mongo/support.rb - spec/gem/Gemfile - spec/gem/bundler.rb - spec/gem/rubygems.rb - spec/integration/collection_spec.rb - spec/integration/connection_spec.rb - spec/integration/cursor_spec.rb - spec/integration/database_spec.rb - spec/integration/request_response_spec.rb - spec/spec_helper.rb - spec/unit/bson_spec.rb homepage: https://github.com/bcg/em-mongo licenses: [] metadata: {} post_install_message: rdoc_options: - "--charset=UTF-8" require_paths: - lib required_ruby_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' required_rubygems_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' requirements: [] rubyforge_project: em-mongo rubygems_version: 2.2.0 signing_key: specification_version: 4 summary: An EventMachine driver for MongoDB. test_files: - spec/gem/Gemfile - spec/gem/bundler.rb - spec/gem/rubygems.rb - spec/integration/collection_spec.rb - spec/integration/connection_spec.rb - spec/integration/cursor_spec.rb - spec/integration/database_spec.rb - spec/integration/request_response_spec.rb - spec/spec_helper.rb - spec/unit/bson_spec.rb em-mongo-0.5.1/.gitignore0000644000004100000410000000006712313272263015257 0ustar www-datawww-dataGemfile.lock .bundle *.gem vendor .rvmrc .ruby-version em-mongo-0.5.1/VERSION0000644000004100000410000000000612313272263014330 0ustar www-datawww-data0.5.1 em-mongo-0.5.1/checksums.yaml.gz0000444000004100000410000000041312313272263016550 0ustar www-datawww-data/&Se9@ y|`]c=<|^VKX8Xfʨ:^x>;Oۯϩ! :>P*jY4zO>4RbR LVBۅkV,Z%}kBH35<Ѱe [=EؔԽ k Rjd5N;`®i-[V/{P.Moa@flCE9QEORG켷_(oƈ{ n 3Rbu6@8"FG5K