mongo-2.5.1/0000755000004100000410000000000013257253113012670 5ustar www-datawww-datamongo-2.5.1/Rakefile0000644000004100000410000001450113257253113014336 0ustar www-datawww-data#!/usr/bin/env ruby require 'rubygems' begin require 'bundler' require 'bundler/gem_tasks' rescue LoadError raise '[FAIL] Bundler not found! Install it with ' + '`gem install bundler; bundle install`.' end default_groups = [:default, :testing] Bundler.require(*default_groups) require 'rspec/core/rake_task' RSpec::Core::RakeTask.new(:spec) do |t| t.rspec_opts = "--profile 5" if ENV['CI'] end task :default => :spec namespace :spec do task :ci => [:spec] end task :release => :spec do system "git tag -a #{Mongo::VERSION} -m 'Tagging release: #{Mongo::VERSION}'" system "git push --tags" system "gem build mongo.gemspec" system "gem push mongo-#{Mongo::VERSION}.gem" system "rm mongo-#{Mongo::VERSION}.gem" end desc "Generate all documentation" task :docs => 'docs:yard' namespace :docs do desc "Generate yard documention" task :yard do out = File.join('yard-docs', Mongo::VERSION) FileUtils.rm_rf(out) system "yardoc -o #{out} --title mongo-#{Mongo::VERSION}" end end require_relative "profile/benchmarking" # Some require data files, available from the drivers team. See the comments above each task for details." namespace :benchmark do desc "Run the driver benchmark tests." namespace :micro do desc "Run the common driver micro benchmarking tests" namespace :flat do desc "Benchmarking for flat bson documents." # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json. task :encode do puts "MICRO BENCHMARK:: FLAT:: ENCODE" Mongo::Benchmarking::Micro.run(:flat, :encode) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called flat_bson.json. task :decode do puts "MICRO BENCHMARK:: FLAT:: DECODE" Mongo::Benchmarking::Micro.run(:flat, :decode) end end namespace :deep do desc "Benchmarking for deep bson documents." # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json. task :encode do puts "MICRO BENCHMARK:: DEEP:: ENCODE" Mongo::Benchmarking::Micro.run(:deep, :encode) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called deep_bson.json. task :decode do puts "MICRO BENCHMARK:: DEEP:: DECODE" Mongo::Benchmarking::Micro.run(:deep, :decode) end end namespace :full do desc "Benchmarking for full bson documents." # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json. task :encode do puts "MICRO BENCHMARK:: FULL:: ENCODE" Mongo::Benchmarking::Micro.run(:full, :encode) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called full_bson.json. task :decode do puts "MICRO BENCHMARK:: FULL:: DECODE" Mongo::Benchmarking::Micro.run(:full, :decode) end end end namespace :single_doc do desc "Run the common driver single-document benchmarking tests" task :command do puts "SINGLE DOC BENCHMARK:: COMMAND" Mongo::Benchmarking::SingleDoc.run(:command) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. task :find_one do puts "SINGLE DOC BENCHMARK:: FIND ONE BY ID" Mongo::Benchmarking::SingleDoc.run(:find_one) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. task :insert_one_small do puts "SINGLE DOC BENCHMARK:: INSERT ONE SMALL DOCUMENT" Mongo::Benchmarking::SingleDoc.run(:insert_one_small) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. task :insert_one_large do puts "SINGLE DOC BENCHMARK:: INSERT ONE LARGE DOCUMENT" Mongo::Benchmarking::SingleDoc.run(:insert_one_large) end end namespace :multi_doc do desc "Run the common driver multi-document benchmarking tests" # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called TWEET.json. task :find_many do puts "MULTI DOCUMENT BENCHMARK:: FIND MANY" Mongo::Benchmarking::MultiDoc.run(:find_many) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called SMALL_DOC.json. task :bulk_insert_small do puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT SMALL" Mongo::Benchmarking::MultiDoc.run(:bulk_insert_small) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called LARGE_DOC.json. task :bulk_insert_large do puts "MULTI DOCUMENT BENCHMARK:: BULK INSERT LARGE" Mongo::Benchmarking::MultiDoc.run(:bulk_insert_large) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. task :gridfs_upload do puts "MULTI DOCUMENT BENCHMARK:: GRIDFS UPLOAD" Mongo::Benchmarking::MultiDoc.run(:gridfs_upload) end # Requirement: A file in Mongo::Benchmarking::DATA_PATH, called GRIDFS_LARGE. task :gridfs_download do puts "MULTI DOCUMENT BENCHMARK:: GRIDFS DOWNLOAD" Mongo::Benchmarking::MultiDoc.run(:gridfs_download) end end namespace :parallel do desc "Run the common driver paralell ETL benchmarking tests" # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, # with the files used in this task. task :import do puts "PARALLEL ETL BENCHMARK:: IMPORT" Mongo::Benchmarking::Parallel.run(:import) end # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called LDJSON_MULTI, # with the files used in this task. # Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/LDJSON_MULTI" # called 'output'. task :export do puts "PARALLEL ETL BENCHMARK:: EXPORT" Mongo::Benchmarking::Parallel.run(:export) end # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, # with the files used in this task. task :gridfs_upload do puts "PARALLEL ETL BENCHMARK:: GRIDFS UPLOAD" Mongo::Benchmarking::Parallel.run(:gridfs_upload) end # Requirement: A directory in Mongo::Benchmarking::DATA_PATH, called GRIDFS_MULTI, # with the files used in this task. # Requirement: Another directory in "#{Mongo::Benchmarking::DATA_PATH}/GRIDFS_MULTI" # called 'output'. task :gridfs_download do puts "PARALLEL ETL BENCHMARK:: GRIDFS DOWNLOAD" Mongo::Benchmarking::Parallel.run(:gridfs_download) end end end mongo-2.5.1/bin/0000755000004100000410000000000013257253113013440 5ustar www-datawww-datamongo-2.5.1/bin/mongo_console0000755000004100000410000000061513257253113016231 0ustar www-datawww-data#!/usr/bin/env ruby $LOAD_PATH[0, 0] = File.join(File.dirname(__FILE__), '..', 'lib') require 'mongo' # include the mongo namespace include Mongo begin require 'pry' rescue LoadError end begin require 'irb' rescue LoadError end if defined?(Pry) Pry.config.prompt_name = 'mongo' Pry.start elsif defined?(IRB) IRB.start else abort 'LoadError: mongo_console requires Pry or IRB' end mongo-2.5.1/data.tar.gz.sig0000444000004100000410000000040013257253113015501 0ustar www-datawww-dataH1d+ƨDgslab 20%d6qS\iCeWhtӴ*.n9$ؗZv30=KC8a p 98vя(ԙ G8aE^Skv 0Rƫ(joABFdҞ96)s.yC@g45B\uP$Цal.{.XP.`MODEr_d$Pmongo-2.5.1/spec/0000755000004100000410000000000013257253113013622 5ustar www-datawww-datamongo-2.5.1/spec/mongo/0000755000004100000410000000000013257253113014741 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth_spec.rb0000644000004100000410000000234413257253113017244 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth do describe '#get' do context 'when a mongodb_cr user is provided' do let(:user) do Mongo::Auth::User.new(auth_mech: :mongodb_cr) end let(:cr) do described_class.get(user) end it 'returns CR' do expect(cr).to be_a(Mongo::Auth::CR) end end context 'when a mongodb_x509 user is provided' do let(:user) do Mongo::Auth::User.new(auth_mech: :mongodb_x509) end let(:x509) do described_class.get(user) end it 'returns X509' do expect(x509).to be_a(Mongo::Auth::X509) end end context 'when a plain user is provided' do let(:user) do Mongo::Auth::User.new(auth_mech: :plain) end let(:ldap) do described_class.get(user) end it 'returns LDAP' do expect(ldap).to be_a(Mongo::Auth::LDAP) end end context 'when an invalid mechanism is provided' do let(:user) do Mongo::Auth::User.new(auth_mech: :nothing) end it 'raises an error' do expect { described_class.get(user) }.to raise_error(Mongo::Auth::InvalidMechanism) end end end end mongo-2.5.1/spec/mongo/server_spec.rb0000644000004100000410000001467613257253113017624 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server do let(:topology) do double('topology') end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) end end let(:listeners) do Mongo::Event::Listeners.new end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:address) do default_address end let(:pool) do Mongo::Server::ConnectionPool.get(server) end describe '#==' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end context 'when the other is not a server' do let(:other) do false end it 'returns false' do expect(server).to_not eq(other) end end context 'when the other is a server' do context 'when the addresses match' do let(:other) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end it 'returns true' do expect(server).to eq(other) end end context 'when the addresses dont match' do let(:other_address) do Mongo::Address.new('127.0.0.1:27018') end let(:other) do described_class.new(other_address, cluster, monitoring, listeners, TEST_OPTIONS) end it 'returns false' do expect(server).to_not eq(other) end end end end describe '#connectable?' do context 'when the server is connectable' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end after do server.disconnect! end before do expect(cluster).to receive(:pool).with(server).and_return(pool) end it 'returns true' do expect(server).to be_connectable end end context 'when the server is not connectable' do let(:bad_address) do Mongo::Address.new('127.0.0.1:666') end let(:server) do described_class.new(bad_address, cluster, monitoring, listeners, TEST_OPTIONS) end before do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'returns false' do expect(server).to_not be_connectable end end end describe '#disconnect!' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end it 'stops the monitor instance' do expect(server.instance_variable_get(:@monitor)).to receive(:stop!).and_return(true) expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end end describe '#initialize' do let(:server) do described_class.new( address, cluster, monitoring, listeners, TEST_OPTIONS.merge(:heartbeat_frequency => 5) ) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'sets the address host' do expect(server.address.host).to eq(default_address.host) end it 'sets the address port' do expect(server.address.port).to eq(default_address.port) end it 'sets the options' do expect(server.options).to eq(TEST_OPTIONS.merge(:heartbeat_frequency => 5)) end end describe '#scan!' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'forces a scan on the monitor' do expect(server.scan!).to eq(server.description) end end describe '#reconnect!' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end before do expect(server.monitor).to receive(:restart!).and_call_original end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'restarts the monitor and returns true' do expect(server.reconnect!).to be(true) end end describe 'retry_writes?' do let(:server) do described_class.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end before do allow(server).to receive(:features).and_return(features) end context 'when the server version is less than 3.6' do let(:features) do double('features', sessions_enabled?: false) end context 'when the server has a logical_session_timeout value' do before do allow(server).to receive(:logical_session_timeout).and_return(true) end it 'returns false' do expect(server.retry_writes?).to be(false) end end context 'when the server does not have a logical_session_timeout value' do before do allow(server).to receive(:logical_session_timeout).and_return(nil) end it 'returns false' do expect(server.retry_writes?).to be(false) end end end context 'when the server version is at least 3.6' do let(:features) do double('features', sessions_enabled?: true) end context 'when the server has a logical_session_timeout value' do before do allow(server).to receive(:logical_session_timeout).and_return(true) end context 'when the server is a standalone' do before do allow(server).to receive(:standalone?).and_return(true) end it 'returns false' do expect(server.retry_writes?).to be(false) end end context 'when the server is not a standalone' do before do allow(server).to receive(:standalone?).and_return(true) end it 'returns false' do expect(server.retry_writes?).to be(false) end end end context 'when the server does not have a logical_session_timeout value' do before do allow(server).to receive(:logical_session_timeout).and_return(nil) end it 'returns false' do expect(server.retry_writes?).to be(false) end end end end end mongo-2.5.1/spec/mongo/cursor_spec.rb0000644000004100000410000002561713257253113017630 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cursor do describe '#each' do let(:server) do view.send(:server_selector).select_server(authorized_client.cluster) end let(:reply) do view.send(:send_initial_query, server) end let(:cursor) do described_class.new(view, reply, server) end context 'when no options are provided to the view' do let(:view) do Mongo::Collection::View.new(authorized_collection) end context 'when the initial query retrieves all documents' do let(:documents) do (1..10).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end it 'returns the correct amount' do expect(cursor.to_a.count).to eq(10) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when the initial query does not retrieve all documents' do let(:documents) do (1..102).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end context 'when a getmore gets a socket error' do let(:op) do double('operation') end before do expect(cursor).to receive(:get_more_operation).and_return(op).ordered expect(op).to receive(:execute).and_raise(Mongo::Error::SocketError).ordered expect(cursor).to receive(:get_more_operation).and_call_original.ordered end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when no errors occur' do it 'returns the correct amount' do expect(cursor.to_a.count).to eq(102) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end end end context 'when options are provided to the view' do let(:documents) do (1..10).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end context 'when a limit is provided' do context 'when no batch size is provided' do context 'when the limit is positive' do let(:view) do Mongo::Collection::View.new(authorized_collection, {}, :limit => 2) end it 'returns the correct amount' do expect(cursor.to_a.count).to eq(2) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when the limit is negative' do let(:view) do Mongo::Collection::View.new(authorized_collection, {}, :limit => -2) end it 'returns the positive number of documents' do expect(cursor.to_a.count).to eq(2) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when the limit is zero' do let(:view) do Mongo::Collection::View.new(authorized_collection, {}, :limit => 0) end it 'returns all documents' do expect(cursor.to_a.count).to eq(10) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end end context 'when a batch size is provided' do context 'when the batch size is less than the limit' do let(:view) do Mongo::Collection::View.new( authorized_collection, {}, :limit => 5, :batch_size => 3 ) end it 'returns the limited number of documents' do expect(cursor.to_a.count).to eq(5) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when the batch size is more than the limit' do let(:view) do Mongo::Collection::View.new( authorized_collection, {}, :limit => 5, :batch_size => 7 ) end it 'returns the limited number of documents' do expect(cursor.to_a.count).to eq(5) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end context 'when the batch size is the same as the limit' do let(:view) do Mongo::Collection::View.new( authorized_collection, {}, :limit => 5, :batch_size => 5 ) end it 'returns the limited number of documents' do expect(cursor.to_a.count).to eq(5) end it 'iterates the documents' do cursor.each do |doc| expect(doc).to have_key('field') end end end end end end context 'when the cursor is not fully iterated and is garbage collected' do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) cursor_reaper.schedule_kill_cursor(cursor.id, cursor.send(:kill_cursors_op_spec), cursor.instance_variable_get(:@server)) end after do authorized_collection.delete_many end let(:view) do Mongo::Collection::View.new( authorized_collection, {}, :batch_size => 2 ) end let!(:cursor) do view.to_enum.next view.instance_variable_get(:@cursor) end let(:cursor_reaper) do authorized_client.cluster.instance_variable_get(:@cursor_reaper) end it 'schedules a kill cursors op', unless: sessions_enabled? do sleep(Mongo::Cluster::PeriodicExecutor::FREQUENCY) expect { cursor.to_a }.to raise_exception(Mongo::Error::OperationFailure) end context 'when the cursor is unregistered before the kill cursors operations are executed' do it 'does not send a kill cursors operation for the unregistered cursor' do cursor_reaper.unregister_cursor(cursor.id) expect(cursor.to_a.size).to eq(documents.size) end end end context 'when the cursor is fully iterated' do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end let(:view) do authorized_collection.find({}, batch_size: 2) end let(:cursor) do view.instance_variable_get(:@cursor) end let!(:cursor_id) do enum.next enum.next cursor.id end let(:enum) do view.to_enum end let(:cursor_reaper) do authorized_collection.client.cluster.instance_variable_get(:@cursor_reaper) end it 'removes the cursor id from the active cursors tracked by the cluster cursor manager' do enum.next expect(cursor_reaper.instance_variable_get(:@active_cursors)).not_to include(cursor_id) end end end context 'when an implicit session is used', if: sessions_enabled? do let(:collection) do subscribed_client[TEST_COLL] end before do collection.insert_many(documents) end after do collection.delete_many end let(:cursor) do view.instance_variable_get(:@cursor) end let(:enum) do view.to_enum end let(:session_pool_ids) do queue = view.client.cluster.session_pool.instance_variable_get(:@queue) queue.collect { |s| s.session_id } end let(:find_events) do EventSubscriber.started_events.select { |e| e.command_name == "find" } end context 'when all results are retrieved in the first response' do let(:documents) do (1..2).map{ |i| { field: "test#{i}" }} end let(:view) do collection.find end it 'returns the session to the cluster session pool' do 1.times { enum.next } expect(find_events.collect { |event| event.command['lsid'] }.uniq.size).to eq(1) expect(session_pool_ids).to include(find_events.collect { |event| event.command['lsid'] }.uniq.first) end end context 'when a getmore is needed to retrieve all results', if: sessions_enabled? && !sharded? do let(:documents) do (1..4).map{ |i| { field: "test#{i}" }} end let(:view) do collection.find({}, batch_size: 2, limit: 4) end context 'when not all documents are iterated' do it 'returns the session to the cluster session pool' do 3.times { enum.next } expect(find_events.collect { |event| event.command['lsid'] }.uniq.size).to eq(1) expect(session_pool_ids).to include(find_events.collect { |event| event.command['lsid'] }.uniq.first) end end context 'when all documents are iterated' do it 'returns the session to the cluster session pool' do 4.times { enum.next } expect(find_events.collect { |event| event.command['lsid'] }.uniq.size).to eq(1) expect(session_pool_ids).to include(find_events.collect { |event| event.command['lsid'] }.uniq.first) end end end end describe '#inspect' do let(:view) do Mongo::Collection::View.new(authorized_collection) end let(:query_spec) do { :selector => {}, :options => {}, :db_name => TEST_DB, :coll_name => TEST_COLL } end let(:reply) do Mongo::Operation::Read::Query.new(query_spec) end let(:cursor) do described_class.new(view, reply, authorized_primary) end it 'returns a string' do expect(cursor.inspect).to be_a(String) end it 'returns a string containing the collection view inspect string' do expect(cursor.inspect).to match(/.*#{view.inspect}.*/) end end end mongo-2.5.1/spec/mongo/bson_spec.rb0000644000004100000410000000025513257253113017243 0ustar www-datawww-datarequire 'spec_helper' describe Symbol do describe '#bson_type' do it 'serializes to a symbol type' do expect(:test.bson_type).to eq(14.chr) end end end mongo-2.5.1/spec/mongo/options/0000755000004100000410000000000013257253113016434 5ustar www-datawww-datamongo-2.5.1/spec/mongo/options/redacted_spec.rb0000644000004100000410000001753713257253113021563 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Options::Redacted do let(:options) do described_class.new(original_opts) end describe '#to_s' do context 'when the hash contains a sensitive key' do let(:original_opts) do { password: 'sensitive_data' } end it 'replaces the value with the redacted string' do expect(options.to_s).not_to match(original_opts[:password]) end it 'replaces the value with the redacted string' do expect(options.to_s).to match(Mongo::Options::Redacted::STRING_REPLACEMENT) end end context 'when the hash does not contain a sensitive key' do let(:original_opts) do { user: 'emily' } end it 'prints all the values' do expect(options.to_s).to match(original_opts[:user]) end end end describe '#inspect' do context 'when the hash contains a sensitive key' do let(:original_opts) do { password: 'sensitive_data' } end it 'replaces the value with the redacted string' do expect(options.inspect).not_to match(original_opts[:password]) end it 'replaces the value with the redacted string' do expect(options.inspect).to match(Mongo::Options::Redacted::STRING_REPLACEMENT) end end context 'when the hash does not contain a sensitive key' do let(:original_opts) do { name: 'some_name' } end it 'does not replace the value with the redacted string' do expect(options.inspect).to match(original_opts[:name]) end it 'does not replace the value with the redacted string' do expect(options.inspect).not_to match(Mongo::Options::Redacted::STRING_REPLACEMENT) end end end describe '#has_key?' do context 'when the original key is a String' do let(:original_opts) do { 'name' => 'Emily' } end context 'when the method argument is a String' do it 'returns true' do expect(options.has_key?('name')).to be(true) end end context 'when method argument is a Symbol' do it 'returns true' do expect(options.has_key?(:name)).to be(true) end end end context 'when the original key is a Symbol' do let(:original_opts) do { name: 'Emily' } end context 'when the method argument is a String' do it 'returns true' do expect(options.has_key?('name')).to be(true) end end context 'when method argument is a Symbol' do it 'returns true' do expect(options.has_key?(:name)).to be(true) end end end context 'when the hash does not contain the key' do let(:original_opts) do { other: 'Emily' } end context 'when the method argument is a String' do it 'returns false' do expect(options.has_key?('name')).to be(false) end end context 'when method argument is a Symbol' do it 'returns false' do expect(options.has_key?(:name)).to be(false) end end end end describe '#reject' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end context 'when no block is provided' do it 'returns an enumerable' do expect(options.reject).to be_a(Enumerator) end end context 'when a block is provided' do context 'when the block evaluates to true for some pairs' do let(:result) do options.reject { |k,v| k == 'a' } end it 'returns an object consisting of only the remaining pairs' do expect(result).to eq(described_class.new(b: 2, c: 3)) end it 'returns a new object' do expect(result).not_to be(options) end end context 'when the block does not evaluate to true for any pairs' do let(:result) do options.reject { |k,v| k == 'd' } end it 'returns an object with all pairs intact' do expect(result).to eq(described_class.new(a: 1, b: 2, c: 3)) end it 'returns a new object' do expect(result).not_to be(options) end end end end describe '#reject!' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end context 'when no block is provided' do it 'returns an enumerable' do expect(options.reject).to be_a(Enumerator) end end context 'when a block is provided' do context 'when the block evaluates to true for some pairs' do let(:result) do options.reject! { |k,v| k == 'a' } end it 'returns an object consisting of only the remaining pairs' do expect(result).to eq(described_class.new(b: 2, c: 3)) end it 'returns the same object' do expect(result).to be(options) end end context 'when the block does not evaluate to true for any pairs' do let(:result) do options.reject! { |k,v| k == 'd' } end it 'returns nil' do expect(result).to be(nil) end end end end describe '#select' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end context 'when no block is provided' do it 'returns an enumerable' do expect(options.reject).to be_a(Enumerator) end end context 'when a block is provided' do context 'when the block evaluates to true for some pairs' do let(:result) do options.select { |k,v| k == 'a' } end it 'returns an object consisting of those pairs' do expect(result).to eq(described_class.new(a: 1)) end it 'returns a new object' do expect(result).not_to be(options) end end context 'when the block does not evaluate to true for any pairs' do let(:result) do options.select { |k,v| k == 'd' } end it 'returns an object with no pairs' do expect(result).to eq(described_class.new) end it 'returns a new object' do expect(result).not_to be(options) end end context 'when the object is unchanged' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end let(:result) do options.select { |k,v| ['a', 'b', 'c'].include?(k) } end it 'returns a new object' do expect(result).to eq(described_class.new(a: 1, b: 2, c: 3)) end end end end describe '#select!' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end context 'when no block is provided' do it 'returns an enumerable' do expect(options.reject).to be_a(Enumerator) end end context 'when a block is provided' do context 'when the block evaluates to true for some pairs' do let(:result) do options.select! { |k,v| k == 'a' } end it 'returns an object consisting of those pairs' do expect(result).to eq(described_class.new(a: 1)) end it 'returns the same object' do expect(result).to be(options) end end context 'when the block does not evaluate to true for any pairs' do let(:result) do options.select! { |k,v| k == 'd' } end it 'returns an object with no pairs' do expect(result).to eq(described_class.new) end it 'returns the same object' do expect(result).to be(options) end end context 'when the object is unchanged' do let(:options) do described_class.new(a: 1, b: 2, c: 3) end let(:result) do options.select! { |k,v| ['a', 'b', 'c'].include?(k) } end it 'returns nil' do expect(result).to be(nil) end end end end endmongo-2.5.1/spec/mongo/command_monitoring_spec.rb0000644000004100000410000000326313257253113022167 0ustar www-datawww-datarequire 'spec_helper' def ignore?(test) if version = test.ignore_if_server_version_greater_than return true if version == "3.0" && find_command_enabled? end if version = test.ignore_if_server_version_less_than return true if version == "3.1" && !find_command_enabled? end false end describe 'Command Monitoring Events' do COMMAND_MONITORING_TESTS.each do |file| spec = Mongo::CommandMonitoring::Spec.new(file) spec.tests.each do |test| context(test.description) do let(:subscriber) do Mongo::CommandMonitoring::TestSubscriber.new end let(:monitoring) do authorized_client.instance_variable_get(:@monitoring) end before do authorized_collection.find.delete_many authorized_client.subscribe(Mongo::Monitoring::COMMAND, subscriber) end after do monitoring.subscribers[Mongo::Monitoring::COMMAND].delete(subscriber) authorized_collection.find.delete_many end test.expectations.each do |expectation| it "generates a #{expectation.event_name} for #{expectation.command_name}", unless: ignore?(test) do begin test.run(authorized_collection) event = subscriber.send(expectation.event_type)[expectation.command_name] expect(event).to send(expectation.matcher, expectation) rescue Mongo::Error::OperationFailure, Mongo::Error::BulkWriteError event = subscriber.send(expectation.event_type)[expectation.command_name] expect(event).to send(expectation.matcher, expectation) end end end end end end end mongo-2.5.1/spec/mongo/collection/0000755000004100000410000000000013257253113017074 5ustar www-datawww-datamongo-2.5.1/spec/mongo/collection/view/0000755000004100000410000000000013257253113020046 5ustar www-datawww-datamongo-2.5.1/spec/mongo/collection/view/explainable_spec.rb0000644000004100000410000000077113257253113023676 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Explainable do let(:selector) do {} end let(:options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, options) end after do authorized_collection.delete_many end describe '#explain' do let(:explain) do view.explain end it 'executes an explain' do expect(explain[:cursor] == 'BasicCursor' || explain[:queryPlanner]).to be_truthy end end end mongo-2.5.1/spec/mongo/collection/view/builder/0000755000004100000410000000000013257253113021474 5ustar www-datawww-datamongo-2.5.1/spec/mongo/collection/view/builder/find_command_spec.rb0000644000004100000410000002766113257253113025465 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Builder::FindCommand do describe '#specification' do let(:view) do Mongo::Collection::View.new(authorized_collection, filter, options) end let(:builder) do described_class.new(view, nil) end let(:specification) do builder.specification end let(:selector) do specification[:selector] end context 'when the options are standard' do let(:filter) do { 'name' => 'test' } end let(:options) do { sort: { _id: 1 }, projection: { name: 1 }, hint: { name: 1 }, skip: 10, limit: 20, batch_size: 5, single_batch: false, comment: "testing", max_scan: 200, max_time_ms: 40, max_value: { name: 'joe' }, min_value: { name: 'albert' }, return_key: true, show_disk_loc: true, snapshot: true, tailable: true, oplog_replay: true, no_cursor_timeout: true, await_data: true, allow_partial_results: true, collation: { locale: 'en_US' } } end context 'when the operation has a session' do let(:session) do double('session') end let(:builder) do described_class.new(view, session) end it 'adds the session to the specification' do expect(builder.specification[:session]).to be(session) end end it 'maps the collection name' do expect(selector['find']).to eq(authorized_collection.name) end it 'maps the filter' do expect(selector['filter']).to eq(filter) end it 'maps sort' do expect(selector['sort']).to eq('_id' => 1) end it 'maps projection' do expect(selector['projection']).to eq('name' => 1) end it 'maps hint' do expect(selector['hint']).to eq('name' => 1) end it 'maps skip' do expect(selector['skip']).to eq(10) end it 'maps limit' do expect(selector['limit']).to eq(20) end it 'maps batch size' do expect(selector['batchSize']).to eq(5) end it 'maps single batch' do expect(selector['singleBatch']).to be false end it 'maps comment' do expect(selector['comment']).to eq('testing') end it 'maps max scan' do expect(selector['maxScan']).to eq(200) end it 'maps max time ms' do expect(selector['maxTimeMS']).to eq(40) end it 'maps max' do expect(selector['max']).to eq('name' => 'joe') end it 'maps min' do expect(selector['min']).to eq('name' => 'albert') end it 'maps return key' do expect(selector['returnKey']).to be true end it 'maps show record id' do expect(selector['showRecordId']).to be true end it 'maps snapshot' do expect(selector['snapshot']).to be true end it 'maps tailable' do expect(selector['tailable']).to be true end it 'maps oplog replay' do expect(selector['oplogReplay']).to be true end it 'maps no cursor timeout' do expect(selector['noCursorTimeout']).to be true end it 'maps await data' do expect(selector['awaitData']).to be true end it 'maps allow partial results' do expect(selector['allowPartialResults']).to be true end it 'maps collation' do expect(selector['collation']).to eq('locale' => 'en_US') end end context 'when there is a limit' do let(:filter) do { 'name' => 'test' } end context 'when limit is 0' do context 'when batch_size is also 0' do let(:options) do { limit: 0, batch_size: 0 } end it 'does not set the singleBatch' do expect(selector['singleBatch']).to be nil end it 'does not set the limit' do expect(selector['limit']).to be nil end it 'does not set the batch size' do expect(selector['batchSize']).to be nil end end context 'when batch_size is not set' do let(:options) do { limit: 0 } end it 'does not set the singleBatch' do expect(selector['singleBatch']).to be nil end it 'does not set the limit' do expect(selector['limit']).to be nil end it 'does not set the batch size' do expect(selector['batchSize']).to be nil end end end context 'when the limit is negative' do context 'when there is a batch_size' do context 'when the batch_size is positive' do let(:options) do { limit: -1, batch_size: 3 } end it 'sets single batch to true' do expect(selector['singleBatch']).to be true end it 'converts the limit to a positive value' do expect(selector['limit']).to be(options[:limit].abs) end it 'sets the batch size' do expect(selector['batchSize']).to be(options[:batch_size]) end end context 'when the batch_size is negative' do let(:options) do { limit: -1, batch_size: -3 } end it 'sets single batch to true' do expect(selector['singleBatch']).to be true end it 'converts the limit to a positive value' do expect(selector['limit']).to be(options[:limit].abs) end it 'sets the batch size to the limit' do expect(selector['batchSize']).to be(options[:limit].abs) end end end context 'when there is not a batch_size' do let(:options) do { limit: -5 } end it 'sets single batch to true' do expect(selector['singleBatch']).to be true end it 'converts the limit to a positive value' do expect(selector['limit']).to be(options[:limit].abs) end it 'does not set the batch size' do expect(selector['batchSize']).to be_nil end end end context 'when the limit is positive' do context 'when there is a batch_size' do context 'when the batch_size is positive' do let(:options) do { limit: 5, batch_size: 3 } end it 'does not set singleBatch' do expect(selector['singleBatch']).to be nil end it 'sets the limit' do expect(selector['limit']).to be(options[:limit]) end it 'sets the batch size' do expect(selector['batchSize']).to be(options[:batch_size]) end end context 'when the batch_size is negative' do let(:options) do { limit: 5, batch_size: -3 } end it 'sets the singleBatch' do expect(selector['singleBatch']).to be true end it 'sets the limit' do expect(selector['limit']).to be(options[:limit]) end it 'sets the batch size to a positive value' do expect(selector['batchSize']).to be(options[:batch_size].abs) end end end context 'when there is not a batch_size' do let(:options) do { limit: 5 } end it 'does not set the singleBatch' do expect(selector['singleBatch']).to be nil end it 'sets the limit' do expect(selector['limit']).to be(options[:limit]) end it 'does not set the batch size' do expect(selector['batchSize']).to be nil end end end end context 'when there is a batch_size' do let(:filter) do { 'name' => 'test' } end context 'when there is no limit' do context 'when the batch_size is positive' do let(:options) do { batch_size: 3 } end it 'does not set the singleBatch' do expect(selector['singleBatch']).to be nil end it 'does not set the limit' do expect(selector['limit']).to be nil end it 'sets the batch size' do expect(selector['batchSize']).to be(options[:batch_size]) end end context 'when the batch_size is negative' do let(:options) do { batch_size: -3 } end it 'sets the singleBatch' do expect(selector['singleBatch']).to be true end it 'does not set the limit' do expect(selector['limit']).to be nil end it 'sets the batch size to a positive value' do expect(selector['batchSize']).to be(options[:batch_size].abs) end end context 'when batch_size is 0' do let(:options) do { batch_size: 0 } end it 'does not set the singleBatch' do expect(selector['singleBatch']).to be nil end it 'does not set the limit' do expect(selector['limit']).to be nil end it 'does not set the batch size' do expect(selector['batchSize']).to be nil end end end end context 'when limit and batch_size are negative' do let(:filter) do { 'name' => 'test' } end let(:options) do { limit: -1, batch_size: -3 } end it 'sets single batch to true' do expect(selector['singleBatch']).to be true end it 'converts the limit to a positive value' do expect(selector['limit']).to be(options[:limit].abs) end end context 'when cursor_type is specified' do let(:filter) do { 'name' => 'test' } end context 'when cursor_type is :tailable' do let(:options) do { cursor_type: :tailable, } end it 'maps to tailable' do expect(selector['tailable']).to be true end it 'does not map to awaitData' do expect(selector['awaitData']).to be_nil end end context 'when cursor_type is :tailable_await' do let(:options) do { cursor_type: :tailable_await, } end it 'maps to tailable' do expect(selector['tailable']).to be true end it 'maps to awaitData' do expect(selector['awaitData']).to be true end end end context 'when the collection has a read concern defined' do let(:collection) do authorized_collection.with(read_concern: { level: 'invalid' }) end let(:view) do Mongo::Collection::View.new(collection, {}) end it 'applies the read concern of the collection' do expect(selector['readConcern']).to eq(BSON::Document.new(level: 'invalid')) end context 'when explain is called for the find' do let(:collection) do authorized_collection.with(read_concern: { level: 'invalid' }) end let(:view) do Mongo::Collection::View.new(collection, {}) end it 'applies the read concern of the collection' do expect( builder.explain_specification[:selector][:explain][:readConcern]).to eq(BSON::Document.new(level: 'invalid')) end end end context 'when the collection does not have a read concern defined' do let(:filter) do {} end let(:options) do {} end it 'does not apply a read concern' do expect(selector['readConcern']).to be_nil end end end end mongo-2.5.1/spec/mongo/collection/view/builder/op_query_spec.rb0000644000004100000410000000660213257253113024702 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Builder::OpQuery do describe '#specification' do let(:filter) do { 'name' => 'test' } end let(:builder) do described_class.new(view) end let(:specification) do builder.specification end let(:view) do Mongo::Collection::View.new(authorized_collection, filter, options) end context 'when there are modifiers in the options' do let(:options) do { sort: { _id: 1 }, projection: { name: 1 }, hint: { name: 1 }, skip: 10, limit: 20, batch_size: 5, single_batch: false, comment: "testing", max_scan: 200, max_time_ms: 40, max_value: { name: 'joe' }, min_value: { name: 'albert' }, return_key: true, show_disk_loc: true, snapshot: true, tailable: true, oplog_replay: true, no_cursor_timeout: true, tailable_await: true, allow_partial_results: true, read_concern: { level: 'local' } } end let(:selector) do specification[:selector] end let(:opts) do specification[:options] end let(:flags) do opts[:flags] end it 'maps the collection name' do expect(specification[:coll_name]).to eq(authorized_collection.name) end it 'maps the filter' do expect(selector['$query']).to eq(filter) end it 'maps sort' do expect(selector['$orderby']).to eq('_id' => 1) end it 'maps projection' do expect(opts['project']).to eq('name' => 1) end it 'maps hint' do expect(selector['$hint']).to eq('name' => 1) end it 'maps skip' do expect(opts['skip']).to eq(10) end it 'maps limit' do expect(opts['limit']).to eq(20) end it 'maps batch size' do expect(opts['batch_size']).to eq(5) end it 'maps comment' do expect(selector['$comment']).to eq('testing') end it 'maps max scan' do expect(selector['$maxScan']).to eq(200) end it 'maps max time ms' do expect(selector['$maxTimeMS']).to eq(40) end it 'maps max' do expect(selector['$max']).to eq('name' => 'joe') end it 'maps min' do expect(selector['$min']).to eq('name' => 'albert') end it 'does not map read concern' do expect(selector['$readConcern']).to be_nil expect(selector['readConcern']).to be_nil expect(opts['readConcern']).to be_nil end it 'maps return key' do expect(selector['$returnKey']).to be true end it 'maps show record id' do expect(selector['$showDiskLoc']).to be true end it 'maps snapshot' do expect(selector['$snapshot']).to be true end it 'maps tailable' do expect(flags).to include(:tailable_cursor) end it 'maps oplog replay' do expect(flags).to include(:oplog_replay) end it 'maps no cursor timeout' do expect(flags).to include(:no_cursor_timeout) end it 'maps await data' do expect(flags).to include(:await_data) end it 'maps allow partial results' do expect(flags).to include(:partial) end end end end mongo-2.5.1/spec/mongo/collection/view/builder/flags_spec.rb0000644000004100000410000000423613257253113024134 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Builder::Flags do describe '.map_flags' do shared_examples_for 'a flag mapper' do let(:flags) do described_class.map_flags(options) end it 'maps allow partial results' do expect(flags).to include(:partial) end it 'maps oplog replay' do expect(flags).to include(:oplog_replay) end it 'maps no cursor timeout' do expect(flags).to include(:no_cursor_timeout) end it 'maps tailable' do expect(flags).to include(:tailable_cursor) end it 'maps await data' do expect(flags).to include(:await_data) end it 'maps exhaust' do expect(flags).to include(:exhaust) end end context 'when the options are standard' do let(:options) do { :allow_partial_results => true, :oplog_replay => true, :no_cursor_timeout => true, :tailable => true, :await_data => true, :exhaust => true } end it_behaves_like 'a flag mapper' end context 'when the options already have flags' do let(:options) do { :flags => [ :partial, :oplog_replay, :no_cursor_timeout, :tailable_cursor, :await_data, :exhaust ] } end it_behaves_like 'a flag mapper' end context 'when the options include tailable_await' do let(:options) do { :tailable_await => true } end let(:flags) do described_class.map_flags(options) end it 'maps the await data option' do expect(flags).to include(:await_data) end it 'maps the tailable option' do expect(flags).to include(:tailable_cursor) end end context 'when the options provide a cursor type' do let(:options) do { :cursor_type => :await_data } end let(:flags) do described_class.map_flags(options) end it 'maps the cursor type to a flag' do expect(flags).to include(:await_data) end end end end mongo-2.5.1/spec/mongo/collection/view/builder/modifiers_spec.rb0000644000004100000410000001157413257253113025024 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Builder::Modifiers do describe '.map_driver_options' do shared_examples_for 'transformable driver options' do it 'maps hint' do expect(transformed[:hint]).to eq("_id" => 1) end it 'maps comment' do expect(transformed[:comment]).to eq('testing') end it 'maps max scan' do expect(transformed[:max_scan]).to eq(200) end it 'maps max time ms' do expect(transformed[:max_time_ms]).to eq(500) end it 'maps max' do expect(transformed[:max_value]).to eq("name" => 'joe') end it 'maps min' do expect(transformed[:min_value]).to eq("name" => 'albert') end it 'maps return key' do expect(transformed[:return_key]).to be true end it 'maps show record id' do expect(transformed[:show_disk_loc]).to be true end it 'maps snapshot' do expect(transformed[:snapshot]).to be true end it 'maps explain' do expect(transformed[:explain]).to be true end it 'returns a BSON document' do expect(transformed).to be_a(BSON::Document) end end context 'when the keys are strings' do let(:modifiers) do { '$orderby' => { name: 1 }, '$hint' => { _id: 1 }, '$comment' => 'testing', '$snapshot' => true, '$maxScan' => 200, '$max' => { name: 'joe' }, '$min' => { name: 'albert' }, '$maxTimeMS' => 500, '$returnKey' => true, '$showDiskLoc' => true, '$explain' => true } end let(:transformed) do described_class.map_driver_options(modifiers) end it_behaves_like 'transformable driver options' end context 'when the keys are symbols' do let(:modifiers) do { :$orderby => { name: 1 }, :$hint => { _id: 1 }, :$comment => 'testing', :$snapshot => true, :$maxScan => 200, :$max => { name: 'joe' }, :$min => { name: 'albert' }, :$maxTimeMS => 500, :$returnKey => true, :$showDiskLoc => true, :$explain => true } end let(:transformed) do described_class.map_driver_options(modifiers) end it_behaves_like 'transformable driver options' end end describe '.map_server_modifiers' do shared_examples_for 'transformable server modifiers' do it 'maps hint' do expect(transformed[:$hint]).to eq("_id" => 1) end it 'maps comment' do expect(transformed[:$comment]).to eq('testing') end it 'maps max scan' do expect(transformed[:$maxScan]).to eq(200) end it 'maps max time ms' do expect(transformed[:$maxTimeMS]).to eq(500) end it 'maps max' do expect(transformed[:$max]).to eq("name" => 'joe') end it 'maps min' do expect(transformed[:$min]).to eq("name" => 'albert') end it 'maps return key' do expect(transformed[:$returnKey]).to be true end it 'maps show record id' do expect(transformed[:$showDiskLoc]).to be true end it 'maps snapshot' do expect(transformed[:$snapshot]).to be true end it 'maps explain' do expect(transformed[:$explain]).to be true end it 'returns a BSON document' do expect(transformed).to be_a(BSON::Document) end it 'does not include non modifiers' do expect(transformed[:limit]).to be_nil end end context 'when the keys are strings' do let(:options) do { 'sort' => { name: 1 }, 'hint' => { _id: 1 }, 'comment' => 'testing', 'snapshot' => true, 'max_scan' => 200, 'max_value' => { name: 'joe' }, 'min_value' => { name: 'albert' }, 'max_time_ms' => 500, 'return_key' => true, 'show_disk_loc' => true, 'explain' => true, 'limit' => 10 } end let(:transformed) do described_class.map_server_modifiers(options) end it_behaves_like 'transformable server modifiers' end context 'when the keys are symbols' do let(:options) do { :sort => { name: 1 }, :hint => { _id: 1 }, :comment => 'testing', :snapshot => true, :max_scan => 200, :max_value => { name: 'joe' }, :min_value => { name: 'albert' }, :max_time_ms => 500, :return_key => true, :show_disk_loc => true, :explain => true, :limit => 10 } end let(:transformed) do described_class.map_server_modifiers(options) end it_behaves_like 'transformable server modifiers' end end end mongo-2.5.1/spec/mongo/collection/view/map_reduce_spec.rb0000644000004100000410000004662113257253113023522 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::MapReduce do let(:map) do %Q{ function() { emit(this.name, { population: this.population }); }} end let(:reduce) do %Q{ function(key, values) { var result = { population: 0 }; values.forEach(function(value) { result.population += value.population; }); return result; }} end let(:documents) do [ { name: 'Berlin', population: 3000000 }, { name: 'London', population: 9000000 } ] end let(:selector) do {} end let(:view_options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, view_options) end let(:options) do {} end let(:map_reduce_spec) do map_reduce.send(:map_reduce_spec, double('session')) end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end let(:map_reduce) do described_class.new(view, map, reduce, options) end describe '#map_function' do it 'returns the map function' do expect(map_reduce.map_function).to eq(map) end end describe '#reduce_function' do it 'returns the map function' do expect(map_reduce.reduce_function).to eq(reduce) end end describe '#map' do let(:results) do map_reduce.map do |document| document end end it 'calls the Enumerable method' do expect(results).to eq(map_reduce.to_a) end end describe '#reduce' do let(:results) do map_reduce.reduce(0) { |sum, doc| sum + doc['value']['population'] } end it 'calls the Enumerable method' do expect(results).to eq(12000000) end end describe '#each' do context 'when no options are provided' do it 'iterates over the documents in the result' do map_reduce.each do |document| expect(document[:value]).to_not be_nil end end end context 'when provided a session' do let(:options) do { session: session } end let(:operation) do map_reduce.to_a end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when out is in the options' do after do authorized_client['output_collection'].delete_many end context 'when out is a string' do let(:options) do { :out => 'output_collection' } end it 'iterates over the documents in the result' do map_reduce.each do |document| expect(document[:value]).to_not be_nil end end end context 'when out is a document' do let(:options) do { :out => { replace: 'output_collection' } } end it 'iterates over the documents in the result' do map_reduce.each do |document| expect(document[:value]).to_not be_nil end end end end context 'when out is inline' do let(:new_map_reduce) do map_reduce.out(inline: 1) end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end end context 'when out is a collection' do after do authorized_client['output_collection'].delete_many end context 'when #each is called without a block' do let(:new_map_reduce) do map_reduce.out(replace: 'output_collection') end before do new_map_reduce.each end it 'executes the map reduce' do expect(map_reduce.to_a).to eq(new_map_reduce.to_a) end end context 'when the option is to replace' do let(:new_map_reduce) do map_reduce.out(replace: 'output_collection') end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end context 'when provided a session' do let(:options) do { session: session } end let(:operation) do new_map_reduce.to_a end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when the output collection is iterated' do let(:options) do { session: session } end let(:session) do client.start_session end let(:view) do Mongo::Collection::View.new(client[TEST_COLL], selector, view_options) end let(:client) do subscribed_client end let(:find_command) do EventSubscriber.started_events[-1].command end before do begin; client[TEST_COLL].create; rescue; end begin; client.use('another-db')[TEST_COLL].create; rescue; end end it 'uses the session when iterating over the output collection', if: test_sessions? do new_map_reduce.to_a expect(find_command["lsid"]).to eq(BSON::Document.new(session.session_id)) end end context 'when another db is specified', if: (sessions_enabled? && !sharded? && !auth_enabled?) do let(:new_map_reduce) do map_reduce.out(db: 'another-db', replace: 'output_collection') end it 'iterates over the documents in the result', if: (sessions_enabled? && !sharded? && !auth_enabled?) do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection', if: (sessions_enabled? && !sharded? && !auth_enabled?) do expect(new_map_reduce.count).to eq(2) end end end context 'when the option is to merge' do let(:new_map_reduce) do map_reduce.out(merge: 'output_collection') end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end context 'when another db is specified', if: (!auth_enabled? && !sharded? && list_command_enabled?) do let(:new_map_reduce) do map_reduce.out(db: 'another-db', merge: 'output_collection') end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end end end context 'when the option is to reduce' do let(:new_map_reduce) do map_reduce.out(reduce: 'output_collection') end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end context 'when another db is specified', if: (!auth_enabled? && list_command_enabled? && !sharded?) do let(:new_map_reduce) do map_reduce.out(db: 'another-db', reduce: 'output_collection') end it 'iterates over the documents in the result' do new_map_reduce.each do |document| expect(document[:value]).to_not be_nil end end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end end end context 'when the option is a collection name' do let(:new_map_reduce) do map_reduce.out('output_collection') end it 'fetches the results from the collection' do expect(new_map_reduce.count).to eq(2) end end end context 'when the view has a selector' do context 'when the selector is basic' do let(:selector) do { 'name' => 'Berlin' } end it 'applies the selector to the map/reduce' do map_reduce.each do |document| expect(document[:_id]).to eq('Berlin') end end it 'includes the selector in the operation spec' do expect(map_reduce_spec[:selector][:query]).to eq(selector) end end context 'when the selector is advanced' do let(:selector) do { :$query => { 'name' => 'Berlin' }} end it 'applies the selector to the map/reduce' do map_reduce.each do |document| expect(document[:_id]).to eq('Berlin') end end it 'includes the selector in the operation spec' do expect(map_reduce_spec[:selector][:query]).to eq(selector[:$query]) end end end context 'when the view has a limit' do let(:view_options) do { limit: 1 } end it 'applies the limit to the map/reduce' do map_reduce.each do |document| expect(document[:_id]).to eq('Berlin') end end end end describe '#execute' do context 'when output is to a collection' do let(:options) do { out: 'output_collection' } end let!(:result) do map_reduce.execute end it 'executes the map reduce' do expect(authorized_client['output_collection'].count).to eq(2) end it 'returns a result object' do expect(result).to be_a(Mongo::Operation::Result) end end context 'when there is no output' do let(:result) do map_reduce.execute end it 'executes the map reduce' do expect(result.documents.size).to eq(2) end it 'returns a result object' do expect(result).to be_a(Mongo::Operation::Result) end end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:options) do { session: session } end let(:operation) do map_reduce.execute end let(:failed_operation) do described_class.new(view, '$invalid', reduce, options).execute end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end end describe '#finalize' do let(:finalize) do %Q{ function(key, value) { value.testing = test; return value; }} end let(:new_map_reduce) do map_reduce.finalize(finalize) end it 'sets the finalize function' do expect(new_map_reduce.finalize).to eq(finalize) end it 'includes the finalize function in the operation spec' do expect(new_map_reduce.send(:map_reduce_spec, double('session'))[:selector][:finalize]).to eq(finalize) end end describe '#js_mode' do let(:new_map_reduce) do map_reduce.js_mode(true) end it 'sets the js mode value' do expect(new_map_reduce.js_mode).to be true end it 'includes the js mode value in the operation spec' do expect(new_map_reduce.send(:map_reduce_spec, double('session'))[:selector][:jsMode]).to be(true) end end describe '#out' do let(:location) do { 'replace' => 'testing' } end let(:new_map_reduce) do map_reduce.out(location) end it 'sets the out value' do expect(new_map_reduce.out).to eq(location) end it 'includes the out value in the operation spec' do expect(new_map_reduce.send(:map_reduce_spec, double('session'))[:selector][:out]).to eq(location) end context 'when out is not defined' do it 'defaults to inline' do expect(map_reduce_spec[:selector][:out]).to eq('inline' => 1) end end context 'when out is specified in the options' do let(:location) do { 'replace' => 'testing' } end let(:options) do { :out => location } end it 'sets the out value' do expect(map_reduce.out).to eq(location) end it 'includes the out value in the operation spec' do expect(map_reduce_spec[:selector][:out]).to eq(location) end end context 'when out is not inline' do let(:location) do { 'replace' => 'testing' } end let(:options) do { :out => location } end it 'does not allow the operation on a secondary' do expect(map_reduce.send(:secondary_ok?)).to be false end context 'when the server is not a valid for writing' do it 'reroutes the operation to a primary' do allow(map_reduce).to receive(:valid_server?).and_return(false) expect(Mongo::Logger.logger).to receive(:warn?).and_call_original map_reduce.to_a end context 'when the view has a write concern' do let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view) do Mongo::Collection::View.new(collection, selector, view_options) end shared_examples_for 'map reduce that writes accepting write concern' do context 'when the server supports write concern on the mapReduce command', if: (collation_enabled? && standalone?) do it 'uses the write concern' do expect { map_reduce.to_a }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not support write concern on the mapReduce command', unless: collation_enabled? do it 'does not apply the write concern' do expect(map_reduce.to_a.size).to eq(2) end end end context 'when out is a String' do let(:options) do { :out => 'new-collection' } end it_behaves_like 'map reduce that writes accepting write concern' end context 'when out is a document and not inline' do let(:options) do { :out => { merge: 'exisiting-collection' } } end it_behaves_like 'map reduce that writes accepting write concern' end context 'when out is a document but inline is specified' do let(:options) do { :out => { inline: 1 } } end it 'does not use the write concern' do expect(map_reduce.to_a.size).to eq(2) end end end end context 'when the server is a valid for writing' do it 'does not reroute the operation to a primary' do expect(Mongo::Logger.logger).not_to receive(:warn?) map_reduce.to_a end end end end describe '#scope' do let(:object) do { 'value' => 'testing' } end let(:new_map_reduce) do map_reduce.scope(object) end it 'sets the scope object' do expect(new_map_reduce.scope).to eq(object) end it 'includes the scope object in the operation spec' do expect(new_map_reduce.send(:map_reduce_spec, double('session'))[:selector][:scope]).to eq(object) end end describe '#verbose' do let(:verbose) do false end let(:new_map_reduce) do map_reduce.verbose(verbose) end it 'sets the verbose value' do expect(new_map_reduce.verbose).to be(false) end it 'includes the verbose option in the operation spec' do expect(new_map_reduce.send(:map_reduce_spec, double('session'))[:selector][:verbose]).to eq(verbose) end end context 'when limit is set on the view' do let(:limit) do 3 end let(:view_options) do { limit: limit } end it 'includes the limit in the operation spec' do expect(map_reduce_spec[:selector][:limit]).to be(limit) end end context 'when sort is set on the view' do let(:sort) do { name: -1 } end let(:view_options) do { sort: sort } end it 'includes the sort object in the operation spec' do expect(map_reduce_spec[:selector][:sort][:name]).to eq(sort[:name]) end end context 'when the collection has a read preference' do let(:read_preference) do Mongo::ServerSelector.get(mode: :secondary) end it 'includes the read preference in the spec' do allow(authorized_collection).to receive(:read_preference).and_return(read_preference) expect(map_reduce_spec[:read]).to eq(read_preference) end end context 'when collation is specified' do let(:map) do %Q{ function() { emit(this.name, 1); }} end let(:reduce) do %Q{ function(key, values) { return Array.sum(values); }} end before do authorized_collection.insert_many([ { name: 'bang' }, { name: 'bang' }]) end let(:selector) do { name: 'BANG' } end context 'when the server selected supports collations', if: collation_enabled? do context 'when the collation key is a String' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'applies the collation' do expect(map_reduce.first['value']).to eq(2) end end context 'when the collation key is a Symbol' do let(:options) do { collation: { locale: 'en_US', strength: 2 } } end it 'applies the collation' do expect(map_reduce.first['value']).to eq(2) end end end context 'when the server selected does not support collations', unless: collation_enabled? do context 'when the map reduce has collation specified in its options' do let(:options) do { collation: { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { map_reduce.to_a }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { map_reduce.to_a }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end context 'when the view has collation specified in its options' do let(:view_options) do { collation: { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { map_reduce.to_a }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { map_reduce.to_a }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end end mongo-2.5.1/spec/mongo/collection/view/change_stream_spec.rb0000644000004100000410000005311613257253113024213 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::ChangeStream, if: test_change_streams? do let(:pipeline) do [] end let(:options) do {} end let(:view_options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, {}, view_options) end let(:change_stream) do described_class.new(view, pipeline, options) end let(:change_stream_document) do change_stream.send(:pipeline)[0]['$changeStream'] end let!(:sample_resume_token) do stream = authorized_collection.watch authorized_collection.insert_one(a: 1) doc = stream.to_enum.next stream.close doc[:_id] end let(:command_selector) do command_spec[:selector] end let(:command_spec) do change_stream.send(:aggregate_spec, double('session')) end let(:cursor) do change_stream.instance_variable_get(:@cursor) end let(:error) do begin change_stream rescue => e e end end after do authorized_collection.delete_many begin; change_stream.close; rescue; end end describe '#initialize' do it 'sets the view' do expect(change_stream.view).to be(view) end it 'sets the options' do expect(change_stream.options).to eq(options) end context 'when full_document is provided' do context "when the value is 'default'" do let(:options) do { full_document: 'default' } end it 'sets the fullDocument value to default' do expect(change_stream_document[:fullDocument]).to eq('default') end end context "when the value is 'updateLookup'" do let(:options) do { full_document: 'updateLookup' } end it 'sets the fullDocument value to updateLookup' do expect(change_stream_document[:fullDocument]).to eq('updateLookup') end end end context 'when full_document is not provided' do it "defaults to use the 'default' value" do expect(change_stream_document[:fullDocument]).to eq('default') end end context 'when resume_after is provided' do let(:options) do { resume_after: sample_resume_token } end it 'sets the resumeAfter value to the provided document' do expect(change_stream_document[:resumeAfter]).to eq(sample_resume_token) end end context 'when max_await_time_ms is provided' do let(:options) do { max_await_time_ms: 10 } end it 'sets the maxTimeMS value to the provided document' do expect(command_selector[:maxTimeMS]).to eq(10) end end context 'when batch_size is provided' do let(:options) do { batch_size: 5 } end it 'sets the batchSize value to the provided document' do expect(command_selector[:cursor][:batchSize]).to eq(5) end end context 'when collation is provided' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'sets the collation value to the provided document' do expect(command_selector['collation']).to eq(BSON::Document.new(options['collation'])) end end context 'when a changeStream operator is provided by the user as well' do let(:pipeline) do [ { '$changeStream' => { fullDocument: 'default' } }] end it 'raises the error from the server' do expect(error).to be_a(Mongo::Error::OperationFailure) expect(error.message).to include('$changeStream is only valid as the first stage in a pipeline') end end context 'when the collection has a readConcern' do let(:collection) do authorized_collection.with(read_concern: { level: 'majority' }) end let(:view) do Mongo::Collection::View.new(collection, {}, options) end it 'uses the read concern of the collection' do expect(command_selector[:readConcern]).to eq('level' => 'majority') end end context 'when no pipeline is supplied' do it 'uses an empty pipeline' do expect(command_selector[:pipeline][0].keys).to eq(['$changeStream']) end end context 'when other pipeline operators are supplied' do context 'when the other pipeline operators are supported' do let(:pipeline) do [{ '$project' => { '_id' => 0 }}] end it 'uses the pipeline operators' do expect(command_selector[:pipeline][1]).to eq(pipeline[0]) end end context 'when the other pipeline operators are not supported' do let(:pipeline) do [{ '$unwind' => '$test' }] end it 'sends the pipeline to the server without a custom error' do expect { change_stream }.to raise_exception(Mongo::Error::OperationFailure) end context 'when the operation fails', if: test_change_streams? do let!(:before_last_use) do session.instance_variable_get(:@server_session).last_use end let!(:before_operation_time) do (session.operation_time || 0) end let(:pipeline) do [ { '$invalid' => '$test' }] end let(:options) do { session: session } end let!(:operation_result) do begin; change_stream; rescue => e; e; end end let(:session) do client.start_session end let(:client) do authorized_client end it 'raises an error' do expect(operation_result.class).to eq(Mongo::Error::OperationFailure) end it 'updates the last use value' do expect(session.instance_variable_get(:@server_session).last_use).not_to eq(before_last_use) end it 'updates the operation time value' do expect(session.operation_time).not_to eq(before_operation_time) end end end end context 'when the initial batch is empty' do before do change_stream end it 'does not close the cursor' do expect(cursor).to be_a(Mongo::Cursor) end end context 'when provided a session', if: sessions_enabled? && test_change_streams? do let(:options) do { session: session } end let(:operation) do change_stream authorized_collection.insert_one(a: 1) change_stream.to_enum.next end let(:client) do authorized_client end context 'when the session is created from the same client used for the operation' do let(:session) do client.start_session end let(:server_session) do session.instance_variable_get(:@server_session) end let!(:before_last_use) do server_session.last_use end let!(:before_operation_time) do (session.operation_time || 0) end let!(:operation_result) do operation end it 'updates the last use value' do expect(server_session.last_use).not_to eq(before_last_use) end it 'updates the operation time value' do expect(session.operation_time).not_to eq(before_operation_time) end it 'does not close the session when the operation completes' do expect(session.ended?).to be(false) end end context 'when a session from another client is provided' do let(:session) do authorized_client_with_retry_writes.start_session end let(:operation_result) do operation end it 'raises an exception' do expect { operation_result }.to raise_exception(Mongo::Error::InvalidSession) end end context 'when the session is ended before it is used' do let(:session) do client.start_session end before do session.end_session end let(:operation_result) do operation end it 'raises an exception' do expect { operation_result }.to raise_exception(Mongo::Error::InvalidSession) end end end end describe '#close' do context 'when documents have not been retrieved and the stream is closed' do before do expect(cursor).to receive(:kill_cursors) change_stream.close end it 'closes the cursor' do expect(change_stream.instance_variable_get(:@cursor)).to be(nil) expect(change_stream.closed?).to be(true) end it 'raises an error when the stream is attempted to be iterated' do expect { change_stream.to_enum.next }.to raise_exception(StopIteration) end end context 'when some documents have been retrieved and the stream is closed before sending getmore' do before do change_stream authorized_collection.insert_one(a: 1) enum.next change_stream.close end let(:enum) do change_stream.to_enum end it 'raises an error' do expect { enum.next }.to raise_exception(StopIteration) end end end describe '#closed?' do context 'when the change stream has not been closed' do it 'returns false' do expect(change_stream.closed?).to be(false) end end context 'when the change stream has been closed' do before do change_stream.close end it 'returns false' do expect(change_stream.closed?).to be(true) end end end context 'when the first response does not contain the resume token' do let(:pipeline) do [{ '$project' => { _id: 0 } }] end before do change_stream authorized_collection.insert_one(a: 1) end it 'raises an exception and closes the cursor' do expect(cursor).to receive(:kill_cursors).and_call_original expect { change_stream.to_enum.next }.to raise_exception(Mongo::Error::MissingResumeToken) end end context 'when an error is encountered the first time the command is run' do let(:primary_socket) do primary = authorized_collection.client.cluster.servers.find { |s| s.primary? } connection = primary.pool.checkout primary.pool.checkin(connection) connection.send(:socket) end context 'when the error is a resumable error' do shared_examples_for 'a resumable change stream' do before do expect(primary_socket).to receive(:write).and_raise(error).once expect(view.send(:server_selector)).to receive(:select_server).twice.and_call_original change_stream authorized_collection.insert_one(a: 1) end let(:document) do change_stream.to_enum.next end it 'runs the command again while using the same read preference and caches the resume token' do expect(document[:fullDocument][:a]).to eq(1) expect(change_stream_document[:resumeAfter]).to eq(document[:_id]) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do change_stream.to_enum.next end it 'does not close the session' do expect(session.ended?).to be(false) end end end context 'when the error is a SocketError' do let(:error) do Mongo::Error::SocketError end it_behaves_like 'a resumable change stream' end context 'when the error is a SocketTimeoutError' do let(:error) do Mongo::Error::SocketTimeoutError end it_behaves_like 'a resumable change stream' end context "when the error is a 'not master' error" do let(:error) do Mongo::Error::OperationFailure.new('not master') end it_behaves_like 'a resumable change stream' end context "when the error is a 'cursor not found (43)' error" do let(:error) do Mongo::Error::OperationFailure.new('cursor not found (43)') end it_behaves_like 'a resumable change stream' end end context 'when the error is another server error' do before do expect(primary_socket).to receive(:write).and_raise(Mongo::Error::OperationFailure) #expect twice because of kill_cursors in after block expect(view.send(:server_selector)).to receive(:select_server).twice.and_call_original end it 'does not run the command again and instead raises the error' do expect { change_stream }.to raise_exception(Mongo::Error::OperationFailure) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do begin; change_stream; rescue; end end it 'does not close the session' do expect(session.ended?).to be(false) end end end end context 'when a server error is encountered during a getmore' do context 'when the error is a resumable error' do shared_examples_for 'a change stream that encounters an error from a getmore' do before do change_stream authorized_collection.insert_one(a: 1) enum.next authorized_collection.insert_one(a: 2) expect(cursor).to receive(:get_more).once.and_raise(error) expect(cursor).to receive(:kill_cursors).and_call_original expect(Mongo::Operation::Commands::Aggregate).to receive(:new).and_call_original end let(:enum) do change_stream.to_enum end let(:document) do enum.next end it 'runs the command again while using the same read preference and caching the resume token' do expect(document[:fullDocument][:a]).to eq(2) expect(change_stream_document[:resumeAfter]).to eq(document[:_id]) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do enum.next end it 'does not close the session' do expect(session.ended?).to be(false) end end end context 'when the error is a SocketError' do let(:error) do Mongo::Error::SocketError end it_behaves_like 'a change stream that encounters an error from a getmore' end context 'when the error is a SocketTimeoutError' do let(:error) do Mongo::Error::SocketTimeoutError end it_behaves_like 'a change stream that encounters an error from a getmore' end context "when the error is a not 'master error'" do let(:error) do Mongo::Error::OperationFailure.new('not master') end it_behaves_like 'a change stream that encounters an error from a getmore' end context "when the error is a not 'cursor not found error'" do let(:error) do Mongo::Error::OperationFailure.new('cursor not found (43)') end it_behaves_like 'a change stream that encounters an error from a getmore' end end context 'when the error is another server error' do before do change_stream authorized_collection.insert_one(a: 1) enum.next authorized_collection.insert_one(a: 2) expect(cursor).to receive(:get_more).and_raise(Mongo::Error::OperationFailure) expect(cursor).to receive(:kill_cursors).and_call_original expect(Mongo::Operation::Commands::Aggregate).not_to receive(:new) end let(:enum) do change_stream.to_enum end it 'does not run the command again and instead raises the error' do expect { enum.next }.to raise_exception(Mongo::Error::OperationFailure) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do begin; enum.next; rescue; end end it 'does not close the session' do expect(session.ended?).to be(false) end end end end context 'when a server error is encountered during the command following an error during getmore' do context 'when the error is a resumable error' do shared_examples_for 'a change stream that sent getmores, that then encounters an error when resuming' do before do change_stream authorized_collection.insert_one(a: 1) enum.next authorized_collection.insert_one(a: 2) expect(cursor).to receive(:get_more).and_raise(error) expect(cursor).to receive(:kill_cursors).and_call_original expect(change_stream).to receive(:send_initial_query).and_raise(error).once.ordered end let(:enum) do change_stream.to_enum end let(:document) do enum.next end it 'raises the error' do expect { document }.to raise_exception(error) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do begin; document; rescue; end end it 'does not close the session' do expect(session.ended?).to be(false) end end end context 'when the error is a SocketError' do let(:error) do Mongo::Error::SocketError end it_behaves_like 'a change stream that sent getmores, that then encounters an error when resuming' end context 'when the error is a SocketTimeoutError' do let(:error) do Mongo::Error::SocketTimeoutError end it_behaves_like 'a change stream that sent getmores, that then encounters an error when resuming' end context "when the error is a 'not master error'" do let(:error) do Mongo::Error::OperationFailure.new('not master') end it_behaves_like 'a change stream that sent getmores, that then encounters an error when resuming' end context "when the error is a not 'cursor not found error'" do let(:error) do Mongo::Error::OperationFailure.new('cursor not found (43)') end it_behaves_like 'a change stream that sent getmores, that then encounters an error when resuming' end end context 'when the error is another server error' do before do change_stream authorized_collection.insert_one(a: 1) enum.next authorized_collection.insert_one(a: 2) expect(cursor).to receive(:get_more).and_raise(Mongo::Error::OperationFailure.new('not master')) expect(cursor).to receive(:kill_cursors).and_call_original expect(change_stream).to receive(:send_initial_query).and_raise(Mongo::Error::OperationFailure).once.ordered end let(:enum) do change_stream.to_enum end it 'does not run the command again and instead raises the error' do expect { enum.next }.to raise_exception(Mongo::Error::OperationFailure) end context 'when provided a session' do let(:options) do { session: session} end let(:session) do authorized_client.start_session end before do begin; enum.next; rescue; end end it 'does not close the session' do expect(session.ended?).to be(false) end end end end describe '#inspect' do it 'includes the Ruby object_id in the formatted string' do expect(change_stream.inspect).to include(change_stream.object_id.to_s) end context 'when resume_after is provided' do let(:options) do { resume_after: sample_resume_token } end it 'includes resume_after value in the formatted string' do expect(change_stream.inspect).to include(sample_resume_token.to_s) end end context 'when max_await_time_ms is provided' do let(:options) do { max_await_time_ms: 10 } end it 'includes the max_await_time value in the formatted string' do expect(change_stream.inspect).to include({ max_await_time_ms: 10 }.to_s) end end context 'when batch_size is provided' do let(:options) do { batch_size: 5 } end it 'includes the batch_size value in the formatted string' do expect(change_stream.inspect).to include({ batch_size: 5 }.to_s) end end context 'when collation is provided' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'includes the collation value in the formatted string' do expect(change_stream.inspect).to include({ 'collation' => { locale: 'en_US', strength: 2 } }.to_s) end end context 'when pipeline operators are provided' do let(:pipeline) do [{ '$project' => { '_id' => 0 }}] end it 'includes the filters in the formatted string' do expect(change_stream.inspect).to include([{ '$project' => { '_id' => 0 }}].to_s) end end end end mongo-2.5.1/spec/mongo/collection/view/aggregation_spec.rb0000644000004100000410000003574513257253113023712 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Aggregation do let(:pipeline) do [] end let(:view_options) do {} end let(:options) do {} end let(:selector) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, view_options) end let(:aggregation) do described_class.new(view, pipeline, options) end let(:aggregation_spec) do aggregation.send(:aggregate_spec, double('session')) end after do authorized_collection.delete_many end describe '#allow_disk_use' do let(:new_agg) do aggregation.allow_disk_use(true) end it 'sets the value in the options' do expect(new_agg.allow_disk_use).to be true end end describe '#each' do let(:documents) do [ { city: "Berlin", pop: 18913, neighborhood: "Kreuzberg" }, { city: "Berlin", pop: 84143, neighborhood: "Mitte" }, { city: "New York", pop: 40270, neighborhood: "Brooklyn" } ] end let(:pipeline) do [{ "$group" => { "_id" => "$city", "totalpop" => { "$sum" => "$pop" } } }] end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end context 'when provided a session' do let(:options) do { session: session } end let(:operation) do aggregation.to_a end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when a block is provided' do context 'when no batch size is provided' do it 'yields to each document' do aggregation.each do |doc| expect(doc[:totalpop]).to_not be_nil end end end context 'when a batch size of 0 is provided' do let(:aggregation) do described_class.new(view.batch_size(0), pipeline, options) end it 'yields to each document' do aggregation.each do |doc| expect(doc[:totalpop]).to_not be_nil end end end context 'when a batch size of greater than zero is provided' do let(:aggregation) do described_class.new(view.batch_size(5), pipeline, options) end it 'yields to each document' do aggregation.each do |doc| expect(doc[:totalpop]).to_not be_nil end end end end context 'when no block is provided' do it 'returns an enumerated cursor' do expect(aggregation.each).to be_a(Enumerator) end end context 'when an invalid pipeline operator is provided' do let(:pipeline) do [{ '$invalid' => 'operator' }] end it 'raises an OperationFailure' do expect { aggregation.to_a }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the initial response has no results but an active cursor', if: find_command_enabled? do let(:documents) do [ { city: 'a'*6000000 }, { city: 'b'*6000000 } ] end let(:options) do { :use_cursor => true } end let(:pipeline) do [{ '$sample' => { 'size' => 2 } }] end it 'iterates over the result documents' do expect(aggregation.to_a.size).to eq(2) end end context 'when the view has a write concern' do let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view) do Mongo::Collection::View.new(collection, selector, view_options) end context 'when the server supports write concern on the aggregate command', if: collation_enabled? do it 'does not apply the write concern' do expect(aggregation.to_a.size).to eq(2) end end context 'when the server does not support write concern on the aggregation command', unless: collation_enabled? do it 'does not apply the write concern' do expect(aggregation.to_a.size).to eq(2) end end end end describe '#initialize' do let(:options) do { :cursor => true } end it 'sets the view' do expect(aggregation.view).to eq(view) end it 'sets the pipeline' do expect(aggregation.pipeline).to eq(pipeline) end it 'sets the options' do expect(aggregation.options).to eq(BSON::Document.new(options)) end it 'dups the options' do expect(aggregation.options).not_to be(options) end end describe '#explain' do it 'executes an explain' do expect(aggregation.explain).to_not be_empty end context 'session id', if: test_sessions? do let(:options) do { session: session } end let(:client) do subscribed_client end let(:session) do client.start_session end let(:view) do Mongo::Collection::View.new(client[TEST_COLL], selector, view_options) end let(:command) do aggregation.explain EventSubscriber.started_events.find { |c| c.command_name == 'aggregate'}.command end it 'sends the session id' do expect(command['lsid']).to eq(session.session_id) end end context 'when a collation is specified' do before do authorized_collection.insert_many([ { name: 'bang' }, { name: 'bang' }]) end let(:pipeline) do [{ "$match" => { "name" => "BANG" } }] end let(:result) do aggregation.explain['$cursor']['queryPlanner']['collation']['locale'] end context 'when the server selected supports collations', if: collation_enabled? do context 'when the collation key is a String' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'applies the collation' do expect(result).to eq('en_US') end end context 'when the collation key is a Symbol' do let(:options) do { collation: { locale: 'en_US', strength: 2 } } end it 'applies the collation' do expect(result).to eq('en_US') end end end context 'when the server selected does not support collations', unless: collation_enabled? do let(:options) do { collation: { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end describe '#aggregate_spec' do context 'when the collection has a read preference' do let(:read_preference) do Mongo::ServerSelector.get(mode: :secondary) end it 'includes the read preference in the spec' do allow(authorized_collection).to receive(:read_preference).and_return(read_preference) expect(aggregation_spec[:read]).to eq(read_preference) end end context 'when allow_disk_use is set' do let(:aggregation) do described_class.new(view, pipeline, options).allow_disk_use(true) end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:allowDiskUse]).to eq(true) end context 'when allow_disk_use is specified as an option' do let(:options) do { :allow_disk_use => true } end let(:aggregation) do described_class.new(view, pipeline, options) end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:allowDiskUse]).to eq(true) end context 'when #allow_disk_use is also called' do let(:options) do { :allow_disk_use => true } end let(:aggregation) do described_class.new(view, pipeline, options).allow_disk_use(false) end it 'overrides the first option with the second' do expect(aggregation_spec[:selector][:allowDiskUse]).to eq(false) end end end end context 'when max_time_ms is an option' do let(:options) do { :max_time_ms => 100 } end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:maxTimeMS]).to eq(options[:max_time_ms]) end end context 'when comment is an option' do let(:options) do { :comment => 'testing' } end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:comment]).to eq(options[:comment]) end end context 'when batch_size is set' do context 'when batch_size is set on the view' do let(:view_options) do { :batch_size => 10 } end it 'uses the batch_size on the view' do expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(view_options[:batch_size]) end end context 'when batch_size is provided in the options' do let(:options) do { :batch_size => 20 } end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(options[:batch_size]) end context 'when batch_size is also set on the view' do let(:view_options) do { :batch_size => 10 } end it 'overrides the view batch_size with the option batch_size' do expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(options[:batch_size]) end end end end context 'when a hint is specified' do let(:options) do { 'hint' => { 'y' => 1 } } end it 'includes the option in the spec' do expect(aggregation_spec[:selector][:hint]).to eq(options['hint']) end end context 'when use_cursor is set' do context 'when use_cursor is true' do context 'when batch_size is set' do let(:options) do { :use_cursor => true, :batch_size => 10 } end it 'sets a batch size document in the spec' do expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(options[:batch_size]) end end context 'when batch_size is not set' do let(:options) do { :use_cursor => true } end it 'sets an empty document in the spec' do expect(aggregation_spec[:selector][:cursor]).to eq({}) end end end context 'when use_cursor is false' do let(:options) do { :use_cursor => false } end context 'when batch_size is set' do it 'does not set the cursor option in the spec' do expect(aggregation_spec[:selector][:cursor]).to be_nil end end end end end context 'when the aggregation has a collation defined' do before do authorized_collection.insert_many([ { name: 'bang' }, { name: 'bang' }]) end let(:pipeline) do [{ "$match" => { "name" => "BANG" } }] end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end let(:result) do aggregation.collect { |doc| doc['name']} end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result).to eq(['bang', 'bang']) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when $out is in the pipeline' do let(:pipeline) do [{ "$group" => { "_id" => "$city", "totalpop" => { "$sum" => "$pop" } } }, { '$out' => 'output_collection' } ] end after do authorized_client['output_collection'].delete_many end context 'when $out is a string' do it 'does not allow the operation on a secondary' do expect(aggregation.send(:secondary_ok?)).to be(false) end end context 'when $out is a symbol' do let(:pipeline) do [{ "$group" => { "_id" => "$city", "totalpop" => { "$sum" => "$pop" } } }, { :$out => 'output_collection' } ] end it 'does not allow the operation on a secondary' do expect(aggregation.send(:secondary_ok?)).to be(false) end end context 'when the server is not a valid for writing' do it 'reroutes the operation to a primary' do allow(aggregation).to receive(:valid_server?).and_return(false) expect(Mongo::Logger.logger).to receive(:warn?).and_call_original aggregation.to_a end end context 'when the server is a valid for writing' do it 'does not reroute the operation to a primary' do expect(Mongo::Logger.logger).not_to receive(:warn?) aggregation.to_a end context 'when the view has a write concern' do let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view) do Mongo::Collection::View.new(collection, selector, view_options) end context 'when the server supports write concern on the aggregate command', if: collation_enabled? do it 'uses the write concern' do expect { aggregation.to_a }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not support write concern on the aggregation command', unless: collation_enabled? do let(:documents) do [ { city: "Berlin", pop: 18913, neighborhood: "Kreuzberg" }, { city: "Berlin", pop: 84143, neighborhood: "Mitte" }, { city: "New York", pop: 40270, neighborhood: "Brooklyn" } ] end before do authorized_collection.insert_many(documents) aggregation.to_a end it 'does not apply the write concern' do expect(authorized_client['output_collection'].find.count).to eq(2) end end end end end end mongo-2.5.1/spec/mongo/collection/view/immutable_spec.rb0000644000004100000410000000205113257253113023362 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Immutable do let(:selector) do {} end let(:options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, options) end after do authorized_collection.delete_many end describe '#configure' do context 'when the options have modifiers' do let(:options) do { :max_time_ms => 500 } end let(:new_view) do view.projection(_id: 1) end it 'returns a new view' do expect(view).not_to be(new_view) end it 'creates a new options hash' do expect(view.options).not_to be(new_view.options) end it 'keeps the modifier fields already in the options hash' do expect(new_view.modifiers[:$maxTimeMS]).to eq(500) end it 'sets the option' do expect(new_view.projection).to eq('_id' => 1) end it 'creates a new modifiers document' do expect(view.modifiers).not_to be(new_view.modifiers) end end end end mongo-2.5.1/spec/mongo/collection/view/readable_spec.rb0000644000004100000410000007613713257253113023162 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Readable do let(:selector) do {} end let(:options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, options) end after do authorized_collection.delete_many end shared_examples_for 'a read concern aware operation' do context 'when a read concern is provided', if: find_command_enabled? do let(:new_view) do Mongo::Collection::View.new(new_collection, selector, options) end context 'when the read concern is valid' do let(:new_collection) do authorized_collection.with(read_concern: { level: 'local' }) end it 'sends the read concern' do expect { result }.to_not raise_error end end context 'when the read concern is not valid' do let(:new_collection) do authorized_collection.with(read_concern: { level: 'na' }) end it 'raises an exception' do expect { result }.to raise_error(Mongo::Error::OperationFailure) end end end end describe '#allow_partial_results' do let(:new_view) do view.allow_partial_results end it 'sets the flag' do expect(new_view.options[:allow_partial_results]).to be true end it 'returns a new View' do expect(new_view).not_to be(view) end end describe '#aggregate' do let(:documents) do [ { city: "Berlin", pop: 18913, neighborhood: "Kreuzberg" }, { city: "Berlin", pop: 84143, neighborhood: "Mitte" }, { city: "New York", pop: 40270, neighborhood: "Brooklyn" } ] end let(:pipeline) do [{ "$group" => { "_id" => "$city", "totalpop" => { "$sum" => "$pop" } } }] end before do authorized_collection.insert_many(documents) end let(:aggregation) do view.aggregate(pipeline) end context 'when incorporating read concern' do let(:result) do new_view.aggregate(pipeline, options).to_a end it_behaves_like 'a read concern aware operation' end context 'when not iterating the aggregation' do it 'returns the aggregation object' do expect(aggregation).to be_a(Mongo::Collection::View::Aggregation) end end context 'when iterating the aggregation' do it 'yields to each document' do aggregation.each do |doc| expect(doc[:totalpop]).to_not be_nil end end end context 'when options are specified' do let(:agg_options) do { :max_time_ms => 500 } end let(:aggregation) do view.aggregate(pipeline, agg_options) end it 'passes the option to the Aggregation object' do expect(aggregation.options[:max_time_ms]).to eq(agg_options[:max_time_ms]) end end end describe '#map_reduce' do let(:map) do %Q{ function() { emit(this.name, { population: this.population }); }} end let(:reduce) do %Q{ function(key, values) { var result = { population: 0 }; values.forEach(function(value) { result.population += value.population; }); return result; }} end let(:documents) do [ { name: 'Berlin', population: 3000000 }, { name: 'London', population: 9000000 } ] end before do authorized_collection.insert_many(documents) end let(:map_reduce) do view.map_reduce(map, reduce) end context 'when incorporating read concern' do let(:result) do new_view.map_reduce(map, reduce, options).to_a end it_behaves_like 'a read concern aware operation' end context 'when a session supporting causal consistency is used' do let(:view) do Mongo::Collection::View.new(collection, selector, session: session) end let(:operation) do begin; view.map_reduce(map, reduce).to_a; rescue; end end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == 'mapreduce' }.command end it_behaves_like 'an operation supporting causally consistent reads' end context 'when not iterating the map/reduce' do it 'returns the map/reduce object' do expect(map_reduce).to be_a(Mongo::Collection::View::MapReduce) end end context 'when iterating the map/reduce' do it 'yields to each document' do map_reduce.each do |doc| expect(doc[:_id]).to_not be_nil end end end end describe '#batch_size' do let(:options) do { :batch_size => 13 } end context 'when a batch size is specified' do let(:new_batch_size) do 15 end it 'sets the batch size' do new_view = view.batch_size(new_batch_size) expect(new_view.batch_size).to eq(new_batch_size) end it 'returns a new View' do expect(view.batch_size(new_batch_size)).not_to be(view) end end context 'when a batch size is not specified' do it 'returns the batch_size' do expect(view.batch_size).to eq(options[:batch_size]) end end end describe '#comment' do let(:options) do { :comment => 'test1' } end context 'when a comment is specified' do let(:new_comment) do 'test2' end it 'sets the comment' do new_view = view.comment(new_comment) expect(new_view.comment).to eq(new_comment) end it 'returns a new View' do expect(view.comment(new_comment)).not_to be(view) end end context 'when a comment is not specified' do it 'returns the comment' do expect(view.comment).to eq(options[:comment]) end end end describe '#count' do let(:documents) do (1..10).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end let(:result) do view.count(options) end context 'when incorporating read concern' do let(:result) do new_view.count(options) end it_behaves_like 'a read concern aware operation' end context 'when a selector is provided' do let(:selector) do { field: 'test1' } end it 'returns the count of matching documents' do expect(view.count).to eq(1) end it 'returns an integer' do expect(view.count).to be_a(Integer) end end context 'when no selector is provided' do it 'returns the count of matching documents' do expect(view.count).to eq(10) end end it 'takes a read preference option', unless: sharded? do expect(view.count(read: { mode: :secondary })).to eq(10) end context 'when a read preference is set on the view', unless: sharded? do let(:client) do # Set a timeout otherwise, the test will hang for 30 seconds. authorized_client.with(server_selection_timeout: 1) end let(:collection) do client[authorized_collection.name] end before do allow(client.cluster).to receive(:single?).and_return(false) end let(:view) do Mongo::Collection::View.new(collection, selector, options) end let(:view_with_read_pref) do view.read(:mode => :secondary, :tag_sets => [{ 'non' => 'existent' }]) end let(:result) do view_with_read_pref.count end it 'uses the read preference setting on the view' do expect { result }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the collection has a read preference set' do after do client.close end let(:client) do # Set a timeout in case the collection read_preference does get used. # Otherwise, the test will hang for 30 seconds. authorized_client.with(server_selection_timeout: 1) end let(:read_preference) do { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } end let(:collection) do client[authorized_collection.name, read: read_preference] end let(:view) do Mongo::Collection::View.new(collection, selector, options) end context 'when a read preference argument is provided' do let(:result) do view.count(read: { mode: :primary }) end it 'uses the read preference passed to the method' do expect(result).to eq(10) end end context 'when a read preference is set on the view' do let(:view_with_read_pref) do view.read(mode: :primary) end let(:result) do view_with_read_pref.count end it 'uses the read preference of the view' do expect(result).to eq(10) end end context 'when no read preference argument is provided', unless: sharded? do before do allow(view.collection.client.cluster).to receive(:single?).and_return(false) end let(:result) do view.count end it 'uses the read preference of the collection' do expect { result }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the collection does not have a read preference set', unless: sharded? do after do client.close end let(:client) do authorized_client.with(server_selection_timeout: 1) end before do allow(view.collection.client.cluster).to receive(:single?).and_return(false) end let(:collection) do client[authorized_collection.name] end let(:view) do Mongo::Collection::View.new(collection, selector, options) end let(:result) do read_preference = { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } view.count(read: read_preference) end it 'uses the read preference passed to the method' do expect { result }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when a read preference is set on the view' do let(:view_with_read_pref) do view.read(:mode => :primary) end let(:result) do view_with_read_pref.count end it 'uses the read preference passed to the method' do expect(result).to eq(10) end end end it 'takes a max_time_ms option' do expect { view.count(max_time_ms: 0.1) }.to raise_error(Mongo::Error::OperationFailure) end it 'sets the max_time_ms option on the command' do expect(view.count(max_time_ms: 100)).to eq(10) end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.count end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the count' do expect(result).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified in the method options' do let(:selector) do { name: 'BANG' } end let(:result) do view.count(count_options) end before do authorized_collection.insert_one(name: 'bang') end let(:count_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the count' do expect(result).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:count_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end describe '#distinct' do context 'when incorporating read concern' do let(:result) do new_view.distinct(:field, options) end it_behaves_like 'a read concern aware operation' end context 'when a selector is provided' do let(:selector) do { field: 'test' } end let(:documents) do (1..3).map{ |i| { field: "test" }} end before do authorized_collection.insert_many(documents) end context 'when the field is a symbol' do let(:distinct) do view.distinct(:field) end it 'returns the distinct values' do expect(distinct).to eq([ 'test' ]) end end context 'when the field is a string' do let(:distinct) do view.distinct('field') end it 'returns the distinct values' do expect(distinct).to eq([ 'test' ]) end end context 'when the field is nil' do let(:distinct) do view.distinct(nil) end it 'returns an empty array' do expect(distinct).to be_empty end end context 'when the field does not exist' do let(:distinct) do view.distinct(:doesnotexist) end it 'returns an empty array' do expect(distinct).to be_empty end end end context 'when no selector is provided' do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end context 'when the field is a symbol' do let(:distinct) do view.distinct(:field) end it 'returns the distinct values' do expect(distinct.sort).to eq([ 'test1', 'test2', 'test3' ]) end end context 'when the field is a string' do let(:distinct) do view.distinct('field') end it 'returns the distinct values' do expect(distinct.sort).to eq([ 'test1', 'test2', 'test3' ]) end end context 'when the field is nil' do let(:distinct) do view.distinct(nil) end it 'returns an empty array' do expect(distinct).to be_empty end end end context 'when a read preference is set on the view', unless: sharded? do let(:client) do # Set a timeout otherwise, the test will hang for 30 seconds. authorized_client.with(server_selection_timeout: 1) end let(:collection) do client[authorized_collection.name] end before do allow(client.cluster).to receive(:single?).and_return(false) end let(:view) do Mongo::Collection::View.new(collection, selector, options) end let(:view_with_read_pref) do view.read(:mode => :secondary, :tag_sets => [{ 'non' => 'existent' }]) end let(:result) do view_with_read_pref.distinct(:field) end it 'uses the read preference setting on the view' do expect { result }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the collection has a read preference set' do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do client.close end let(:client) do # Set a timeout in case the collection read_preference does get used. # Otherwise, the test will hang for 30 seconds. authorized_client.with(server_selection_timeout: 1) end let(:read_preference) do { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } end let(:collection) do client[authorized_collection.name, read: read_preference] end let(:view) do Mongo::Collection::View.new(collection, selector, options) end context 'when a read preference argument is provided' do let(:distinct) do view.distinct(:field, read: { mode: :primary }) end it 'uses the read preference passed to the method' do expect(distinct.sort).to eq([ 'test1', 'test2', 'test3' ]) end end context 'when no read preference argument is provided', unless: sharded? do before do allow(view.collection.client.cluster).to receive(:single?).and_return(false) end let(:distinct) do view.distinct(:field) end it 'uses the read preference of the collection' do expect { distinct }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the collection does not have a read preference set', unless: sharded? do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) allow(view.collection.client.cluster).to receive(:single?).and_return(false) end after do client.close end let(:client) do authorized_client.with(server_selection_timeout: 1) end let(:collection) do client[authorized_collection.name] end let(:view) do Mongo::Collection::View.new(collection, selector, options) end let(:distinct) do read_preference = { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } view.distinct(:field, read: read_preference) end it 'uses the read preference passed to the method' do expect { distinct }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when a read preference is set on the view' do let(:view_with_read_pref) do view.read(:mode => :secondary, :tag_sets => [{ 'non' => 'existent' }]) end let(:distinct) do view_with_read_pref.distinct(:field, read: { mode: :primary }) end it 'uses the read preference passed to the method' do expect(distinct.sort).to eq([ 'test1', 'test2', 'test3' ]) end end end context 'when a max_time_ms is specified' do let(:documents) do (1..3).map{ |i| { field: "test" }} end before do authorized_collection.insert_many(documents) end it 'sets the max_time_ms option on the command' do expect { view.distinct(:field, max_time_ms: 0.1) }.to raise_error(Mongo::Error::OperationFailure) end it 'sets the max_time_ms option on the command' do expect(view.distinct(:field, max_time_ms: 100)).to eq([ 'test' ]) end end context 'when the field does not exist' do it 'returns an empty array' do expect(view.distinct(:nofieldexists)).to be_empty end end context 'when a collation is specified on the view' do let(:result) do view.distinct(:name) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'BANG') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the distinct' do expect(result).to eq(['bang']) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified in the method options' do let(:result) do view.distinct(:name, distinct_options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'BANG') end let(:distinct_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the distinct' do expect(result).to eq(['bang']) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:distinct_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:result) do view.distinct(:name) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'BANG') end it 'does not apply the collation to the distinct' do expect(result).to match_array(['bang', 'BANG']) end end end describe '#hint' do context 'when a hint is specified' do let(:options) do { :hint => { 'x' => Mongo::Index::ASCENDING } } end let(:new_hint) do { 'x' => Mongo::Index::DESCENDING } end it 'sets the hint' do new_view = view.hint(new_hint) expect(new_view.hint).to eq(new_hint) end it 'returns a new View' do expect(view.hint(new_hint)).not_to be(view) end end context 'when a hint is not specified' do let(:options) do { :hint => 'x' } end it 'returns the hint' do expect(view.hint).to eq(options[:hint]) end end end describe '#limit' do context 'when a limit is specified' do let(:options) do { :limit => 5 } end let(:new_limit) do 10 end it 'sets the limit' do new_view = view.limit(new_limit) expect(new_view.limit).to eq(new_limit) end it 'returns a new View' do expect(view.limit(new_limit)).not_to be(view) end end context 'when a limit is not specified' do let(:options) do { :limit => 5 } end it 'returns the limit' do expect(view.limit).to eq(options[:limit]) end end end describe '#max_scan' do let(:new_view) do view.max_scan(10) end it 'sets the value in the options' do expect(new_view.max_scan).to eq(10) end end describe '#max_value' do let(:new_view) do view.max_value(_id: 1) end it 'sets the value in the options' do expect(new_view.max_value).to eq('_id' => 1) end end describe '#min_value' do let(:new_view) do view.min_value(_id: 1) end it 'sets the value in the options' do expect(new_view.min_value).to eq('_id' => 1) end end describe '#no_cursor_timeout' do let(:new_view) do view.no_cursor_timeout end it 'sets the flag' do expect(new_view.options[:no_cursor_timeout]).to be true end it 'returns a new View' do expect(new_view).not_to be(view) end end describe '#projection' do let(:options) do { :projection => { 'x' => 1 } } end context 'when projection are specified' do let(:new_projection) do { 'y' => 1 } end before do authorized_collection.insert_one(y: 'value', a: 'other_value') end it 'sets the projection' do new_view = view.projection(new_projection) expect(new_view.projection).to eq(new_projection) end it 'returns a new View' do expect(view.projection(new_projection)).not_to be(view) end it 'returns only that field on the collection' do expect(view.projection(new_projection).first.keys).to match_array(['_id', 'y']) end end context 'when projection is not specified' do it 'returns the projection' do expect(view.projection).to eq(options[:projection]) end end context 'when projection is not a document' do let(:new_projection) do 'y' end it 'raises an error' do expect do view.projection(new_projection) end.to raise_error(Mongo::Error::InvalidDocument) end end end describe '#read' do context 'when a read pref is specified' do let(:options) do { :read => { :mode => :secondary } } end let(:new_read) do { :mode => :secondary_preferred } end it 'sets the read preference' do new_view = view.read(new_read) expect(new_view.read).to eq(BSON::Document.new(new_read)) end it 'returns a new View' do expect(view.read(new_read)).not_to be(view) end end context 'when a read pref is not specified' do let(:options) do { :read => Mongo::ServerSelector.get(:mode => :secondary) } end it 'returns the read preference' do expect(view.read).to eq(options[:read]) end context 'when no read pref is set on initialization' do let(:options) do {} end it 'returns the collection read preference' do expect(view.read).to eq(authorized_collection.read_preference) end end end end describe '#show_disk_loc' do let(:options) do { :show_disk_loc => true } end context 'when show_disk_loc is specified' do let(:new_show_disk_loc) do false end it 'sets the show_disk_loc value' do new_view = view.show_disk_loc(new_show_disk_loc) expect(new_view.show_disk_loc).to eq(new_show_disk_loc) end it 'returns a new View' do expect(view.show_disk_loc(new_show_disk_loc)).not_to be(view) end end context 'when show_disk_loc is not specified' do it 'returns the show_disk_loc value' do expect(view.show_disk_loc).to eq(options[:show_disk_loc]) end end end describe '#modifiers' do let(:options) do { :modifiers => { '$orderby' => 1 } } end context 'when a modifiers document is specified' do let(:new_modifiers) do { '$orderby' => -1 } end it 'sets the new_modifiers document' do new_view = view.modifiers(new_modifiers) expect(new_view.modifiers).to eq(new_modifiers) end it 'returns a new View' do expect(view.modifiers(new_modifiers)).not_to be(view) end end context 'when a modifiers document is not specified' do it 'returns the modifiers value' do expect(view.modifiers).to eq(options[:modifiers]) end end end describe '#max_time_ms' do let(:options) do { :max_time_ms => 200 } end context 'when max_time_ms is specified' do let(:new_max_time_ms) do 300 end it 'sets the max_time_ms value' do new_view = view.max_time_ms(new_max_time_ms) expect(new_view.max_time_ms).to eq(new_max_time_ms) end it 'returns a new View' do expect(view.max_time_ms(new_max_time_ms)).not_to be(view) end end context 'when max_time_ms is not specified' do it 'returns the max_time_ms value' do expect(view.max_time_ms).to eq(options[:max_time_ms]) end end end describe '#cusor_type' do let(:options) do { :cursor_type => :tailable } end context 'when cursor_type is specified' do let(:new_cursor_type) do :tailable_await end it 'sets the cursor_type value' do new_view = view.cursor_type(new_cursor_type) expect(new_view.cursor_type).to eq(new_cursor_type) end it 'returns a new View' do expect(view.cursor_type(new_cursor_type)).not_to be(view) end end context 'when cursor_type is not specified' do it 'returns the cursor_type value' do expect(view.cursor_type).to eq(options[:cursor_type]) end end end describe '#skip' do context 'when a skip is specified' do let(:options) do { :skip => 5 } end let(:new_skip) do 10 end it 'sets the skip value' do new_view = view.skip(new_skip) expect(new_view.skip).to eq(new_skip) end it 'returns a new View' do expect(view.skip(new_skip)).not_to be(view) end end context 'when a skip is not specified' do let(:options) do { :skip => 5 } end it 'returns the skip value' do expect(view.skip).to eq(options[:skip]) end end end describe '#snapshot' do let(:new_view) do view.snapshot(true) end it 'sets the value in the options' do expect(new_view.snapshot).to be true end end describe '#sort' do context 'when a sort is specified' do let(:options) do { :sort => { 'x' => Mongo::Index::ASCENDING }} end let(:new_sort) do { 'x' => Mongo::Index::DESCENDING } end it 'sets the sort option' do new_view = view.sort(new_sort) expect(new_view.sort).to eq(new_sort) end it 'returns a new View' do expect(view.sort(new_sort)).not_to be(view) end end context 'when a sort is not specified' do let(:options) do { :sort => { 'x' => Mongo::Index::ASCENDING }} end it 'returns the sort' do expect(view.sort).to eq(options[:sort]) end end end end mongo-2.5.1/spec/mongo/collection/view/writable_spec.rb0000644000004100000410000011514713257253113023227 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View::Writable do let(:selector) do {} end let(:options) do {} end let(:view) do Mongo::Collection::View.new(authorized_collection, selector, options) end after do authorized_collection.delete_many end describe '#find_one_and_delete' do before do authorized_collection.insert_many([{ field: 'test1' }]) end context 'when a matching document is found' do let(:selector) do { field: 'test1' } end context 'when no options are provided' do let!(:document) do view.find_one_and_delete end it 'deletes the document from the database' do expect(view.to_a).to be_empty end it 'returns the document' do expect(document['field']).to eq('test1') end end context 'when a projection is provided' do let!(:document) do view.projection(_id: 1).find_one_and_delete end it 'deletes the document from the database' do expect(view.to_a).to be_empty end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let!(:document) do view.sort(field: 1).find_one_and_delete end it 'deletes the document from the database' do expect(view.to_a).to be_empty end it 'returns the document with limited fields' do expect(document['field']).to eq('test1') end end context 'when collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_delete end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_delete end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end context 'when collation is specified as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_delete(method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end context 'when no matching document is found' do let(:selector) do { field: 'test5' } end let!(:document) do view.find_one_and_delete end it 'returns nil' do expect(document).to be_nil end end end describe '#find_one_and_replace' do before do authorized_collection.insert_many([{ field: 'test1', other: 'sth' }]) end context 'when a matching document is found' do let(:selector) do { field: 'test1' } end context 'when no options are provided' do let(:document) do view.find_one_and_replace({ field: 'testing' }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when return_document options are provided' do let(:document) do view.find_one_and_replace({ field: 'testing' }, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end it 'replaces the document' do expect(document['other']).to be_nil end end context 'when a projection is provided' do let(:document) do view.projection(_id: 1).find_one_and_replace({ field: 'testing' }) end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let(:document) do view.sort(field: 1).find_one_and_replace({ field: 'testing' }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when collation is provided' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_replace(name: 'doink') end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find({ name: 'doink' }, limit: -1).first['name']).to eq('doink') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is provided as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_replace({ name: 'doink' }, method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find({ name: 'doink' }, limit: -1).first['name']).to eq('doink') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not provided' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_replace(name: 'doink') end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end end context 'when no matching document is found' do context 'when no upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do view.find_one_and_replace({ field: 'testing' }) end it 'returns nil' do expect(document).to be_nil end end context 'when upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do view.find_one_and_replace({ field: 'testing' }, :upsert => true, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end end end describe '#find_one_and_update' do before do authorized_collection.insert_many([{ field: 'test1' }]) end context 'when a matching document is found' do let(:selector) do { field: 'test1' } end context 'when no options are provided' do let(:document) do view.find_one_and_update({ '$set' => { field: 'testing' }}) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when return_document options are provided' do let(:document) do view.find_one_and_update({ '$set' => { field: 'testing' }}, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end context 'when a projection is provided' do let(:document) do view.projection(_id: 1).find_one_and_update({ '$set' => { field: 'testing' }}) end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let(:document) do view.sort(field: 1).find_one_and_update({ '$set' => { field: 'testing' } }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_update({ '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find({ name: 'bang' }, limit: -1).first['other']).to eq('doink') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_update({ '$set' => { other: 'doink' } }, method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find({ name: 'bang' }, limit: -1).first['other']).to eq('doink') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when no collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.find_one_and_update({ '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end context 'when no matching document is found' do let(:selector) do { field: 'test5' } end let(:document) do view.find_one_and_update({ '$set' => { field: 'testing' }}) end it 'returns nil' do expect(document).to be_nil end end end describe '#delete_many' do context 'when a selector was provided' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let(:response) do view.delete_many end it 'deletes the matching documents in the collection' do expect(response.written_count).to eq(1) end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let(:response) do view.delete_many end it 'deletes all the documents in the collection' do expect(response.written_count).to eq(2) end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.delete_many end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(0) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.delete_many(method_options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(0) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.delete_many end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(2) end end end describe '#delete_one' do context 'when a selector was provided' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([ { field: 'test1' }, { field: 'test1' }, { field: 'test1' } ]) end let(:response) do view.delete_one end it 'deletes the first matching document in the collection' do expect(response.written_count).to eq(1) end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let(:response) do view.delete_one end it 'deletes the first document in the collection' do expect(response.written_count).to eq(1) end end context 'when a collation is provided' do let(:selector) do { name: 'BANG' } end let(:result) do view.delete_one end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(0) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is provided as a method_option' do let(:selector) do { name: 'BANG' } end let(:result) do view.delete_one(method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(0) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do {name: 'BANG'} end let(:result) do view.delete_one end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(1) end end end describe '#replace_one' do context 'when a selector was provided' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) end let!(:response) do view.replace_one({ field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first matching document in the collection' do expect(response.written_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let!(:response) do view.replace_one({ field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first document in the collection' do expect(response.written_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is false' do let!(:response) do view.replace_one({ field: 'test1' }, upsert: false) end let(:updated) do authorized_collection.find(field: 'test1').to_a end it 'reports that no documents were written' do expect(response.written_count).to eq(0) end it 'does not insert the document' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do view.replace_one({ field: 'test1' }, upsert: true) end let(:updated) do authorized_collection.find(field: 'test1').first end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts the document' do expect(updated[:field]).to eq('test1') end end context 'when upsert is not specified' do let!(:response) do view.replace_one({ field: 'test1' }) end let(:updated) do authorized_collection.find(field: 'test1').to_a end it 'reports that no documents were written' do expect(response.written_count).to eq(0) end it 'does not insert the document' do expect(updated).to be_empty end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.replace_one({ name: 'doink' }) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'doink').to_a.size).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified as method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.replace_one({ name: 'doink' }, method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'doink').to_a.size).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.replace_one(name: 'doink') end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').to_a.size).to eq(1) end end end describe '#update_many' do context 'when a selector was provided' do let(:selector) do { field: 'test' } end before do authorized_collection.insert_many([{ field: 'test' }, { field: 'test' }]) end let!(:response) do view.update_many('$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'returns the number updated' do expect(response.written_count).to eq(2) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let!(:response) do view.update_many('$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find end it 'returns the number updated' do expect(response.written_count).to eq(2) end it 'updates all the documents in the collection' do updated.each do |doc| expect(doc[:field]).to eq('testing') end end end context 'when upsert is false' do let(:response) do view.update_many({ '$set'=> { field: 'testing' } }, upsert: false) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.written_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do view.update_many({ '$set'=> { field: 'testing' } }, upsert: true) end let(:updated) do authorized_collection.find.first end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts a document into the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is not specified' do let(:response) do view.update_many({ '$set'=> { field: 'testing' } }) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.written_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.update_many({ '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(other: 'doink').to_a.size).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is specified as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.update_many({ '$set' => { other: 'doink' } }, method_options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(other: 'doink').to_a.size).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do {name: 'BANG'} end let(:result) do view.update_many('$set' => {other: 'doink'}) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end it 'does not apply the collation' do expect(result.written_count).to eq(0) end end end describe '#update_one' do context 'when a selector was provided' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) end let!(:response) do view.update_one('$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first matching document in the collection' do expect(response.written_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let!(:response) do view.update_one('$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first document in the collection' do expect(response.written_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is false' do let(:response) do view.update_one({ '$set'=> { field: 'testing' } }, upsert: false) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.written_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do view.update_one({ '$set'=> { field: 'testing' } }, upsert: true) end let(:updated) do authorized_collection.find.first end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts a document into the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is not specified' do let(:response) do view.update_one({ '$set'=> { field: 'testing' } }) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.written_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when there is a collation specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.update_one({ '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(other: 'doink').to_a.size).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when there is a collation specified as a method option' do let(:selector) do { name: 'BANG' } end let(:result) do view.update_one({ '$set' => { other: 'doink' } }, method_options) end before do authorized_collection.insert_one(name: 'bang') end let(:method_options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(other: 'doink').to_a.size).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:method_options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do view.update_one('$set' => { other: 'doink' }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) end end end end mongo-2.5.1/spec/mongo/collection/view_spec.rb0000644000004100000410000002202113257253113021402 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection::View do let(:filter) do {} end let(:options) do {} end let(:view) do described_class.new(authorized_collection, filter, options) end after do authorized_collection.delete_many end describe '#==' do context 'when the other object is not a collection view' do let(:other) { 'test' } it 'returns false' do expect(view).to_not eq(other) end end context 'when the views have the same collection, filter, and options' do let(:other) do described_class.new(authorized_collection, filter, options) end it 'returns true' do expect(view).to eq(other) end end context 'when two views have a different collection' do let(:other_collection) do authorized_client[:other] end let(:other) do described_class.new(other_collection, filter, options) end it 'returns false' do expect(view).not_to eq(other) end end context 'when two views have a different filter' do let(:other_filter) do { 'name' => 'Emily' } end let(:other) do described_class.new(authorized_collection, other_filter, options) end it 'returns false' do expect(view).not_to eq(other) end end context 'when two views have different options' do let(:other_options) do { 'limit' => 20 } end let(:other) do described_class.new(authorized_collection, filter, other_options) end it 'returns false' do expect(view).not_to eq(other) end end end describe 'copy' do let(:view_clone) do view.clone end it 'dups the options' do expect(view.options).not_to be(view_clone.options) end it 'dups the filter' do expect(view.filter).not_to be(view_clone.filter) end it 'references the same collection' do expect(view.collection).to be(view_clone.collection) end end describe '#each' do let(:documents) do (1..10).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end context 'when a block is not provided' do let(:enumerator) do view.each end it 'returns an enumerator' do enumerator.each do |doc| expect(doc).to have_key('field') end end end describe '#close_query' do let(:options) do { :batch_size => 1 } end let(:cursor) do view.instance_variable_get(:@cursor) end before do view.to_enum.next cursor.instance_variable_set(:@cursor_id, 1) unless find_command_enabled? end it 'sends a kill cursors command for the cursor' do expect(cursor).to receive(:kill_cursors).and_call_original view.close_query end end describe 'collation' do context 'when the view has a collation set' do let(:options) do { collation: { locale: 'en_US', strength: 2 } } end let(:filter) do { name: 'BANG' } end before do authorized_collection.insert_one(name: 'bang') end let(:result) do view.limit(-1).first end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the view does not have a collation set' do let(:filter) do { name: 'BANG' } end before do authorized_collection.insert_one(name: 'bang') end let(:result) do view.limit(-1).first end it 'does not apply the collation' do expect(result).to be_nil end end end end describe '#hash' do let(:other) do described_class.new(authorized_collection, filter, options) end it 'returns a unique value based on collection, filter, options' do expect(view.hash).to eq(other.hash) end context 'when two views only have different collections' do let(:other_collection) do authorized_client[:other] end let(:other) do described_class.new(other_collection, filter, options) end it 'returns different hash values' do expect(view.hash).not_to eq(other.hash) end end context 'when two views only have different filter' do let(:other_filter) do { 'name' => 'Emily' } end let(:other) do described_class.new(authorized_collection, other_filter, options) end it 'returns different hash values' do expect(view.hash).not_to eq(other.hash) end end context 'when two views only have different options' do let(:other_options) do { 'limit' => 20 } end let(:other) do described_class.new(authorized_collection, filter, other_options) end it 'returns different hash values' do expect(view.hash).not_to eq(other.hash) end end end describe '#initialize' do context 'when the filter is not a valid document' do let(:filter) do 'y' end let(:options) do { limit: 5 } end it 'raises an error' do expect do view end.to raise_error(Mongo::Error::InvalidDocument) end end context 'when the filter and options are standard' do let(:filter) do { 'name' => 'test' } end let(:options) do { 'sort' => { 'name' => 1 }} end it 'parses a standard filter' do expect(view.filter).to eq(filter) end it 'parses standard options' do expect(view.options).to eq(options) end it 'only freezes the view filter, not the user filter' do expect(view.filter.frozen?).to be(true) expect(filter.frozen?).to be(false) end it 'only freezes the view options, not the user options' do expect(view.options.frozen?).to be(true) expect(options.frozen?).to be(false) end end context 'when the filter contains modifiers' do let(:filter) do { :$query => { :name => 'test' }, :$comment => 'testing' } end let(:options) do { :sort => { name: 1 }} end it 'parses a standard filter' do expect(view.filter).to eq('name' => 'test') end it 'parses standard options' do expect(view.options).to eq('sort' => { 'name' => 1 }, 'comment' => 'testing') end end context 'when the options contain modifiers' do let(:filter) do { 'name' => 'test' } end let(:options) do { :sort => { name: 1 }, :modifiers => { :$comment => 'testing'}} end it 'parses a standard filter' do expect(view.filter).to eq('name' => 'test') end it 'parses standard options' do expect(view.options).to eq('sort' => { 'name' => 1 }, 'comment' => 'testing') end end context 'when the filter and options both contain modifiers' do let(:filter) do { :$query => { 'name' => 'test' }, :$hint => { name: 1 }} end let(:options) do { :sort => { name: 1 }, :modifiers => { :$comment => 'testing' }} end it 'parses a standard filter' do expect(view.filter).to eq('name' => 'test') end it 'parses standard options' do expect(view.options).to eq( 'sort' => { 'name' => 1 }, 'comment' => 'testing', 'hint' => { 'name' => 1 } ) end end end describe '#inspect' do context 'when there is a namespace, filter, and options' do let(:options) do { 'limit' => 5 } end let(:filter) do { 'name' => 'Emily' } end it 'returns a string' do expect(view.inspect).to be_a(String) end it 'returns a string containing the collection namespace' do expect(view.inspect).to match(/.*#{authorized_collection.namespace}.*/) end it 'returns a string containing the filter' do expect(view.inspect).to match(/.*#{filter.inspect}.*/) end it 'returns a string containing the options' do expect(view.inspect).to match(/.*#{options.inspect}.*/) end end end end mongo-2.5.1/spec/mongo/protocol/0000755000004100000410000000000013257253113016602 5ustar www-datawww-datamongo-2.5.1/spec/mongo/protocol/update_spec.rb0000644000004100000410000001110013257253113021414 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::Update do let(:opcode) { 2001 } let(:db) { TEST_DB } let(:coll) { TEST_COLL } let(:ns) { "#{db}.#{coll}" } let(:selector) { { :name => 'Tyler' } } let(:update_doc) { { :name => 'Bob' } } let(:options) { Hash.new } let(:message) do described_class.new(db, coll, selector, update_doc, options) end describe '#initialize' do it 'sets the namespace' do expect(message.namespace).to eq(ns) end it 'sets the selector' do expect(message.selector).to eq(selector) end it 'sets the update document' do expect(message.update).to eq(update_doc) end end describe '#==' do context 'when the other is an update' do context 'when the fields are equal' do let(:other) do described_class.new(db, coll, selector, update_doc, options) end it 'returns true' do expect(message).to eq(other) end end context 'when the database is not equal' do let(:other) do described_class.new('tyler', coll, selector, update_doc, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the collection is not equal' do let(:other) do described_class.new(db, 'tyler', selector, update_doc, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the selector is not equal' do let(:other) do described_class.new(db, coll, { :a => 1 }, update_doc, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the update document is not equal' do let(:other) do described_class.new(db, coll, selector, { :a => 1 }, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the options are not equal' do let(:other) do described_class.new(db, coll, selector, update_doc, :flags => :upsert) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a query' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns false' do expect(message).to_not be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'zero' do let(:field) { bytes.to_s[16..19] } it 'serializes a zero' do expect(field).to be_int32(0) end end describe 'namespace' do let(:field) { bytes.to_s[20..36] } it 'serializes the namespace' do expect(field).to be_cstring(ns) end end describe 'flags' do let(:field) { bytes.to_s[37..40] } context 'when no flags are provided' do it 'does not set any bits' do expect(field).to be_int32(0) end end context 'when flags are provided' do let(:options) { { :flags => flags } } context 'upsert flag' do let(:flags) { [:upsert] } it 'sets the first bit' do expect(field).to be_int32(1) end end context 'multi update' do let(:flags) { [:multi_update] } it 'sets the second bit' do expect(field).to be_int32(2) end end end end describe 'selector' do let(:field) { bytes.to_s[41..61] } it 'serializes the selector' do expect(field).to be_bson(selector) end end describe 'update' do let(:field) { bytes.to_s[62..80] } it 'serializes the update' do expect(field).to be_bson(update_doc) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/protocol/compressed_spec.rb0000644000004100000410000000315413257253113022310 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::Compressed do let(:original_message) { Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { ping: 1 }) } let(:compressor) { 'zlib' } let(:level) { nil } let(:message) do described_class.new(original_message, compressor, level) end describe '#serialize' do context 'when zlib compression level is not provided' do let(:original_message_bytes) do buf = BSON::ByteBuffer.new original_message.send(:serialize_fields, buf) buf.to_s end it 'does not set a compression level' do expect(Zlib::Deflate).to receive(:deflate).with(original_message_bytes, nil).and_call_original message.serialize end end context 'when zlib compression level is provided' do let(:level) { 1 } let(:original_message_bytes) do buf = BSON::ByteBuffer.new original_message.send(:serialize_fields, buf) buf.to_s end it 'uses the compression level' do expect(Zlib::Deflate).to receive(:deflate).with(original_message_bytes, 1).and_call_original message.serialize end end end describe '#replyable?' do context 'when the original message is replyable' do it 'returns true' do expect(message.replyable?).to be(true) end end context 'when the original message is not replyable', if: op_msg_enabled? do let(:original_message) do Mongo::Protocol::Msg.new([:more_to_come], {}, { ping: 1 }) end it 'returns false' do expect(message.replyable?).to be(false) end end end end mongo-2.5.1/spec/mongo/protocol/query_spec.rb0000644000004100000410000002002113257253113021301 0ustar www-datawww-data# encoding: UTF-8 require 'spec_helper' describe Mongo::Protocol::Query do let(:opcode) { 2004 } let(:db) { TEST_DB } let(:coll) { TEST_COLL } let(:ns) { "#{db}.#{coll}" } let(:selector) { { :name => 'Tyler' } } let(:options) { Hash.new } let(:message) do described_class.new(db, coll, selector, options) end describe '#initialize' do it 'sets the namespace' do expect(message.namespace).to eq(ns) end it 'sets the selector' do expect(message.selector).to eq(selector) end context 'when options are provided' do context 'when flags are provided' do let(:options) { { :flags => [:slave_ok] } } it 'sets the flags' do expect(message.flags).to eq(options[:flags]) end end context 'when a limit is provided' do let(:options) { { :limit => 5 } } it 'sets the limit' do expect(message.limit).to eq(options[:limit]) end end context 'when a skip is provided' do let(:options) { { :skip => 13 } } it 'sets the flags' do expect(message.skip).to eq(options[:skip]) end end context 'when a projection is provided' do let(:options) { { :project => { :_id => 0 } } } it 'sets the projection' do expect(message.project).to eq(options[:project]) end end end end describe '#==' do context 'when the other is a query' do context 'when the fields are equal' do let(:other) do described_class.new(db, coll, selector, options) end it 'returns true' do expect(message).to eq(other) end end context 'when the database is not equal' do let(:other) do described_class.new('tyler', coll, selector, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the collection is not equal' do let(:other) do described_class.new(db, 'tyler', selector, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the selector is not equal' do let(:other) do described_class.new(db, coll, { :a => 1 }, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the options are not equal' do let(:other) do described_class.new(db, coll, selector, :skip => 2) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a query' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns true' do expect(message).to be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'flags' do let(:field) { bytes.to_s[16..19] } context 'when no flags are provided' do it 'does not set any bits' do expect(field).to be_int32(0) end end context 'when flags are provided' do let(:options) { { :flags => flags } } context 'tailable cursor flag' do let(:flags) { [:tailable_cursor] } it 'sets the second bit' do expect(field).to be_int32(2) end end context 'slave ok flag' do let(:flags) { [:slave_ok] } it 'sets the third bit' do expect(field).to be_int32(4) end end context 'oplog replay flag' do let(:flags) { [:oplog_replay] } it 'sets the fourth bit' do expect(field).to be_int32(8) end end context 'no cursor timeout flag' do let(:flags) { [:no_cursor_timeout] } it 'sets the fifth bit' do expect(field).to be_int32(16) end end context 'await data flag' do let(:flags) { [:await_data] } it 'sets the sixth bit' do expect(field).to be_int32(32) end end context 'exhaust flag' do let(:flags) { [:exhaust] } it 'sets the seventh bit' do expect(field).to be_int32(64) end end context 'partial flag' do let(:flags) { [:partial] } it 'sets the eigth bit' do expect(field).to be_int32(128) end end context 'multiple flags' do let(:flags) { [:await_data, :slave_ok] } it 'sets the correct bits' do expect(field).to be_int32(36) end end end end describe 'namespace' do let(:field) { bytes.to_s[20..36] } it 'serializes the namespace' do expect(field).to be_cstring(ns) end context 'when the namespace contains unicode characters' do let(:field) { bytes.to_s[20..40] } let(:coll) do 'områder' end it 'serializes the namespace' do expect(field).to be_cstring(ns) end end end describe 'skip' do let(:field) { bytes.to_s[37..40] } context 'when no skip is provided' do it 'serializes a zero' do expect(field).to be_int32(0) end end context 'when skip is provided' do let(:options) { { :skip => 5 } } it 'serializes the skip' do expect(field).to be_int32(options[:skip]) end end end describe 'limit' do let(:field) { bytes.to_s[41..44] } context 'when no limit is provided' do it 'serializes a zero' do expect(field).to be_int32(0) end end context 'when limit is provided' do let(:options) { { :limit => 123 } } it 'serializes the limit' do expect(field).to be_int32(options[:limit]) end end end describe 'selector' do let(:field) { bytes.to_s[45..65] } it 'serializes the selector' do expect(field).to be_bson(selector) end end describe 'project' do let(:field) { bytes.to_s[66..-1] } context 'when no projection is provided' do it 'does not serialize a projection' do expect(field).to be_empty end end context 'when projection is provided' do let(:options) { { :project => projection } } let(:projection) { { :_id => 0 } } it 'serializes the projection' do expect(field).to be_bson(projection) end end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end describe '#compress' do context 'when the selector represents a command that can be compressed' do let(:selector) do { ping: 1 } end it 'returns a compressed message' do expect(message.compress!('zlib')).to be_a(Mongo::Protocol::Compressed) end end context 'when the selector represents a command for which compression is not allowed' do Mongo::Monitoring::Event::Secure::REDACTED_COMMANDS.each do |command| let(:selector) do { command => 1 } end context "when the command is #{command}" do it 'does not allow compression for the command' do expect(message.compress!('zlib')).to be(message) end end end end end end mongo-2.5.1/spec/mongo/protocol/get_more_spec.rb0000644000004100000410000000676613257253113021761 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::GetMore do let(:opcode) { 2005 } let(:db) { TEST_DB } let(:coll) { TEST_COLL } let(:ns) { "#{db}.#{coll}" } let(:limit) { 25 } let(:cursor_id) { 12345 } let(:message) do described_class.new(db, coll, limit, cursor_id) end describe '#initialize' do it 'sets the namepsace' do expect(message.namespace).to eq(ns) end it 'sets the number to return' do expect(message.number_to_return).to eq(limit) end it 'sets the cursor id' do expect(message.cursor_id).to eq(cursor_id) end end describe '#==' do context 'when the other is a getmore' do context 'when the fields are equal' do let(:other) do described_class.new(db, coll, limit, cursor_id) end it 'returns true' do expect(message).to eq(other) end end context 'when the database is not equal' do let(:other) do described_class.new('tyler', coll, limit, cursor_id) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the collection is not equal' do let(:other) do described_class.new(db, 'tyler', limit, cursor_id) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the limit is not equal' do let(:other) do described_class.new(db, coll, 123, cursor_id) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the cursor id is not equal' do let(:other) do described_class.new(db, coll, limit, 7777) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a getmore' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns true' do expect(message).to be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'zero' do let(:field) { bytes.to_s[16..19] } it 'does not set any bits' do expect(field).to be_int32(0) end end describe 'namespace' do let(:field) { bytes.to_s[20..36] } it 'serializes the namespace' do expect(field).to be_cstring(ns) end end describe 'number to return' do let(:field) { bytes.to_s[37..40] } it 'serializes the number to return' do expect(field).to be_int32(limit) end end describe 'cursor id' do let(:field) { bytes.to_s[41..48] } it 'serializes the cursor id' do expect(field).to be_int64(cursor_id) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/protocol/reply_spec.rb0000644000004100000410000001166513257253113021305 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::Reply do let(:length) { 78 } let(:request_id) { 0 } let(:response_to) { 0 } let(:op_code) { 1 } let(:flags) { 0 } let(:start) { 0 } let(:n_returned) { 2 } let(:cursor_id) { 999_999 } let(:doc) { { 'name' => 'Tyler' } } let(:documents) { [doc] * 2 } let(:header) do [length, request_id, response_to, op_code].pack('l 'Tyler' } } let(:options) { Hash.new } let(:message) do described_class.new(db, coll, selector, options) end describe '#initialize' do it 'sets the namepsace' do expect(message.namespace).to eq(ns) end it 'sets the selector' do expect(message.selector).to eq(selector) end context 'when options are provided' do context 'when flags are provided' do let(:options) { { :flags => [:single_remove] } } it 'sets the flags' do expect(message.flags).to eq(options[:flags]) end end end end describe '#==' do context 'when the other is a delete' do context 'when the fields are equal' do let(:other) do described_class.new(db, coll, selector, options) end it 'returns true' do expect(message).to eq(other) end end context 'when the database is not equal' do let(:other) do described_class.new('tyler', coll, selector, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the collection is not equal' do let(:other) do described_class.new(db, 'tyler', selector, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the selector is not equal' do let(:other) do described_class.new(db, coll, { :a => 1 }, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the options are not equal' do let(:other) do described_class.new(db, coll, selector, :flags => [:single_remove]) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a delete' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns false' do expect(message).to_not be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'zero' do let(:field) { bytes.to_s[16..19] } it 'serializes a zero' do expect(field).to be_int32(0) end end describe 'namespace' do let(:field) { bytes.to_s[20..36] } it 'serializes the namespace' do expect(field).to be_cstring(ns) end end describe 'flags' do let(:field) { bytes.to_s[37..40] } context 'when no flags are provided' do it 'does not set any bits' do expect(field).to be_int32(0) end end context 'when flags are provided' do let(:options) { { :flags => flags } } context 'single remove flag' do let(:flags) { [:single_remove] } it 'sets the first bit' do expect(field).to be_int32(1) end end end end describe 'selector' do let(:field) { bytes.to_s[41..-1] } it 'serializes the selector' do expect(field).to be_bson(selector) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/protocol/msg_spec.rb0000644000004100000410000003035713257253113020737 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::Msg do let(:opcode) { 2013 } let(:flags) { [:none] } let(:options) { {} } let(:global_args) { { '$db' => TEST_DB, ping: 1 } } let(:sections) { [ ] } let(:message) do described_class.new(flags, options, global_args, *sections) end let(:deserialized) do Mongo::Protocol::Message.deserialize(StringIO.new(message.serialize.to_s)) end describe '#initialize' do it 'adds the global_args to the sections' do expect(message.sections[0]).to eq(type: 0, payload: global_args) end context 'when flag bits are provided' do context 'when valid flags are provided' do let(:flags) { [:more_to_come] } it 'sets the flags' do expect(message.flags).to eq(flags) end end context 'when flags are not provided' do let(:flags) { nil } it 'sets the flags to [:none]' do expect(message.flags).to eq([:none]) end end context 'when an invalid flag is provided' do let(:flags) { [:checksum_present, :none] } let(:flag_bytes) { message.serialize.to_s[16..19] } it 'sets the flags' do expect(message.flags).to eq([:checksum_present, :none]) end it 'only serializes the valid flags' do expect(flag_bytes).to be_int32(1) end end end end describe '#==' do context 'when the other is a msg' do context 'when the fields are equal' do let(:other) do described_class.new(flags, options, global_args) end it 'returns true' do expect(message).to eq(other) end end context 'when the flags are not equal' do let(:other) do described_class.new([:more_to_come], options, global_args) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the global_args are not equal' do let(:other) do described_class.new(flags, nil, { '$db'=> TEST_DB, ismaster: 1 }) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a msg' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do context 'when the :more_to_come flag is set' do let(:flags) { [:more_to_come] } it 'returns false' do expect(message).to_not be_replyable end end context 'when the :more_to_come flag is not set' do it 'returns true' do expect(message).to be_replyable end end end describe '#serialize' do let(:bytes) do message.serialize end let(:flag_bytes) { bytes.to_s[16..19] } let(:payload_type) { bytes.to_s[20] } let(:payload_bytes) { bytes.to_s[21..-1] } let(:global_args) { { ping: 1 } } include_examples 'message with a header' context 'when flags are provided' do context 'when checksum_present is provided' do let(:flags) do [:checksum_present] end it 'sets the flag bits' do expect(flag_bytes).to be_int32(1) end end context 'when more_to_come is provided' do let(:flags) do [:more_to_come] end it 'sets the flag bits' do expect(flag_bytes).to be_int32(2) end end end context 'when no flag is provided' do let(:flags) do nil end it 'sets the flag bits to 0' do expect(flag_bytes).to be_int32(0) end end context 'when global args are provided' do it 'sets the payload type' do expect(payload_type).to eq(0.chr) end it 'serializes the global arguments' do expect(payload_bytes).to be_bson(global_args) end end context 'when additional sections are provided' do let(:sections) do [ section ] end context 'when an invalid payload type is specified' do let(:section) do { type: 2, payload: { identifier: 'documents', sequence: [ { a: 1 } ] } } end it 'raises an exception' do expect { message.serialize }.to raise_exception(Mongo::Error::UnknownPayloadType) end end context 'when a 0 payload type is specified' do let(:section) do { type: 0, payload: { ismaster: 1 } } end let(:section_payload_type) { bytes.to_s[36] } let(:section_bytes) { bytes.to_s[37..-1] } it 'sets the payload type' do expect(section_payload_type).to eq(0.chr) end it 'serializes the section' do expect(section_bytes).to be_bson(section[:payload]) end end context 'when a no payload type is specified' do let(:section) do { payload: { ismaster: 1 } } end let(:section_payload_type) { bytes.to_s[36] } let(:section_bytes) { bytes.to_s[37..-1] } it 'sets the payload type as 0' do expect(section_payload_type).to eq(0.chr) end it 'serializes the section' do expect(section_bytes).to be_bson(section[:payload]) end end context 'when a 1 payload type is specified' do let(:section) do { type: 1, payload: { identifier: 'documents', sequence: [ { a: 1 } ] } } end let(:section_payload_type) { bytes.to_s[36] } let(:section_size) { bytes.to_s[37..40] } let(:section_identifier) { bytes.to_s[41..50] } let(:section_bytes) { bytes.to_s[51..-1] } it 'sets the payload type' do expect(section_payload_type).to eq(1.chr) end it 'sets the section size' do expect(section_size).to be_int32(26) end it 'serializes the section identifier' do expect(section_identifier).to eq("documents#{BSON::NULL_BYTE}") end it 'serializes the section bytes' do expect(section_bytes).to be_bson({ a: 1 }) end context 'when two sections are specified' do let(:sections) do [ section1, section2 ] end let(:section1) do { type: 1, payload: { identifier: 'documents', sequence: [ { a: 1 } ] } } end let(:section2) do { type: 1, payload: { identifier: 'updates', sequence: [ {:q => { :bar => 1 }, :u => { :$set => { :bar => 2 } }, :multi => true, :upsert => false } ] } } end let(:section1_payload_type) { bytes.to_s[36] } let(:section1_size) { bytes.to_s[37..40] } let(:section1_identifier) { bytes.to_s[41..50] } let(:section1_bytes) { bytes.to_s[51..62] } it 'sets the first payload type' do expect(section1_payload_type).to eq(1.chr) end it 'sets the first section size' do expect(section1_size).to be_int32(26) end it 'serializes the first section identifier' do expect(section1_identifier).to eq("documents#{BSON::NULL_BYTE}") end it 'serializes the first section bytes' do expect(section1_bytes).to be_bson({ a: 1 }) end let(:section2_payload_type) { bytes.to_s[63] } let(:section2_size) { bytes.to_s[64..67] } let(:section2_identifier) { bytes.to_s[68..75] } let(:section2_bytes) { bytes.to_s[76..-1] } it 'sets the second payload type' do expect(section2_payload_type).to eq(1.chr) end it 'sets the second section size' do expect(section2_size).to be_int32(79) end it 'serializes the second section identifier' do expect(section2_identifier).to eq("updates#{BSON::NULL_BYTE}") end it 'serializes the second section bytes' do expect(section2_bytes).to be_bson(section2[:payload][:sequence][0]) end end end context 'when the sections are mixed types and payload type 1 comes before type 0' do let(:section1) do { type: 1, payload: { identifier: 'documents', sequence: [ { 'a' => 1 }]}} end let(:section2) do { type: 0, payload: { 'b' => 2 } } end let(:sections) do [ section1, section2 ] end it 'serializes all sections' do expect(deserialized.documents).to eq([ BSON::Document.new(global_args), { 'a' => 1 }, { 'b' => 2 }]) end end end context 'when the validating_keys option is true with payload 1' do let(:sections) do [ section ] end let(:section) do { type: 1, payload: { identifier: 'documents', sequence: [ { '$b' => 2 } ] } } end let(:options) do { validating_keys: true } end it 'checks the sequence document keys' do expect { message.serialize }.to raise_exception(BSON::String::IllegalKey) end end context 'when the validating_keys option is false with payload 1' do let(:sections) do [ section ] end let(:section) do { type: 1, payload: { identifier: 'documents', sequence: [ { '$b' => 2 } ] } } end let(:options) do { validating_keys: false } end it 'does not check the sequence document keys' do expect(message.serialize).to be_a(BSON::ByteBuffer) end end end describe '#deserialize' do context 'when the payload type is valid' do it 'deserializes the message' do expect(deserialized.documents).to eq([ BSON::Document.new(global_args) ]) end end context 'when the payload type is not valid' do let(:invalid_payload_message) do message.serialize.to_s.tap do |s| s[20] = 5.chr end end it 'raises an exception' do expect { Mongo::Protocol::Message.deserialize(StringIO.new(invalid_payload_message)) }.to raise_exception(Mongo::Error::UnknownPayloadType) end end end describe '#payload' do context 'when the msg only contains a payload type 0' do it 'creates a payload with the command' do expect(message.payload[:command_name]).to eq(:ping) expect(message.payload[:database_name]).to eq(TEST_DB) expect(message.payload[:command]).to eq('ping' => 1) expect(message.payload[:request_id]).to eq(message.request_id) end end context 'when the contains a payload type 1' do let(:section) do { type: 1, payload: { identifier: 'documents', sequence: [ { a: 1 } ] } } end let(:global_args) do { '$db' => TEST_DB, 'insert' => TEST_COLL, 'ordered' => true } end let(:sections) do [ section ] end let(:expected_command_doc) do { 'insert' => TEST_COLL, 'documents' => [{ 'a' => 1 }], 'ordered' => true } end it 'creates a payload with the command' do expect(message.payload[:command_name]).to eq('insert') expect(message.payload[:database_name]).to eq(TEST_DB) expect(message.payload[:command]).to eq(expected_command_doc) expect(message.payload[:request_id]).to eq(message.request_id) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/protocol/registry_spec.rb0000644000004100000410000000127213257253113022013 0ustar www-datawww-datarequire "spec_helper" describe Mongo::Protocol::Registry do describe ".get" do context "when the type has a correspoding class" do before do described_class.register(Mongo::Protocol::Query::OP_CODE, Mongo::Protocol::Query) end let(:klass) do described_class.get(Mongo::Protocol::Query::OP_CODE, "message") end it "returns the class" do expect(klass).to eq(Mongo::Protocol::Query) end end context "when the type has no corresponding class" do it "raises an error" do expect { described_class.get(-100) }.to raise_error(Mongo::Error::UnsupportedMessageType) end end end end mongo-2.5.1/spec/mongo/protocol/insert_spec.rb0000644000004100000410000000766613257253113021464 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::Insert do let(:opcode) { 2002 } let(:db) { TEST_DB } let(:coll) { TEST_COLL } let(:ns) { "#{db}.#{coll}" } let(:doc1) { { :name => 'Tyler' } } let(:doc2) { { :name => 'Brandon' } } let(:docs) { [doc1, doc2] } let(:options) { Hash.new } let(:message) do described_class.new(db, coll, docs, options) end describe '#initialize' do it 'sets the namepsace' do expect(message.namespace).to eq(ns) end it 'sets the documents' do expect(message.documents).to eq(docs) end context 'when options are provided' do context 'when flags are provided' do let(:options) { { :flags => [:continue_on_error] } } it 'sets the flags' do expect(message.flags).to eq(options[:flags]) end end end end describe '#==' do context 'when the other is an insert' do context 'when the fields are equal' do let(:other) do described_class.new(db, coll, docs, options) end it 'returns true' do expect(message).to eq(other) end end context 'when the database is not equal' do let(:other) do described_class.new('tyler', coll, docs, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the collection is not equal' do let(:other) do described_class.new(db, 'tyler', docs, options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the documents are not equal' do let(:other) do described_class.new(db, coll, docs[1..1], options) end it 'returns false' do expect(message).not_to eq(other) end end context 'when the options are not equal' do let(:other) do described_class.new(db, coll, docs, :flags => [:continue_on_error]) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not an insert' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns false' do expect(message).to_not be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'flags' do let(:field) { bytes.to_s[16..19] } context 'when no flags are provided' do it 'does not set any bits' do expect(field).to be_int32(0) end end context 'when flags are provided' do let(:options) { { :flags => flags } } context 'continue on error flag' do let(:flags) { [:continue_on_error] } it 'sets the first bit' do expect(field).to be_int32(1) end end end end describe 'namespace' do let(:field) { bytes.to_s[20..36] } it 'serializes the namespace' do expect(field).to be_cstring(ns) end end describe 'documents' do let(:field) { bytes.to_s[37..-1] } it 'serializes the documents' do expect(field).to be_bson_sequence(docs) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/protocol/kill_cursors_spec.rb0000644000004100000410000000510413257253113022654 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Protocol::KillCursors do let(:opcode) { 2007 } let(:cursor_ids) { [123, 456, 789] } let(:id_count) { cursor_ids.size } let(:collection) { TEST_COLL } let(:database) { TEST_DB } let(:message) do described_class.new(collection, database, cursor_ids) end describe '#initialize' do it 'sets the cursor ids' do expect(message.cursor_ids).to eq(cursor_ids) end it 'sets the count' do expect(message.id_count).to eq(id_count) end end describe '#==' do context 'when the other is a killcursors' do context 'when the cursor ids are equal' do let(:other) do described_class.new(collection, database, cursor_ids) end it 'returns true' do expect(message).to eq(other) end end context 'when the cursor ids are not equal' do let(:other) do described_class.new(collection, database, [123, 456]) end it 'returns false' do expect(message).not_to eq(other) end end end context 'when the other is not a killcursors' do let(:other) do expect(message).not_to eq('test') end end end describe '#hash' do let(:values) do message.send(:fields).map do |field| message.instance_variable_get(field[:name]) end end it 'returns a hash of the field values' do expect(message.hash).to eq(values.hash) end end describe '#replyable?' do it 'returns false' do expect(message).to_not be_replyable end end describe '#serialize' do let(:bytes) { message.serialize } include_examples 'message with a header' describe 'zero' do let(:field) { bytes.to_s[16..19] } it 'serializes a zero' do expect(field).to be_int32(0) end end describe 'number of cursors' do let(:field) { bytes.to_s[20..23] } it 'serializes the cursor count' do expect(field).to be_int32(id_count) end end describe 'cursor ids' do let(:field) { bytes.to_s[24..-1] } it 'serializes the selector' do expect(field).to be_int64_sequence(cursor_ids) end end end describe '#registry' do context 'when the class is loaded' do it 'registers the op code in the Protocol Registry' do expect(Mongo::Protocol::Registry.get(described_class::OP_CODE)).to be(described_class) end it 'creates an #op_code instance method' do expect(message.op_code).to eq(described_class::OP_CODE) end end end end mongo-2.5.1/spec/mongo/retryable_writes_spec.rb0000644000004100000410000005334213257253113021675 0ustar www-datawww-datarequire 'spec_helper' describe 'Retryable Writes' do RETRYABLE_WRITES_TESTS.each do |file| spec = Mongo::CRUD::Spec.new(file) context(spec.description) do spec.tests.each do |test| context(test.description) do let(:collection) do client[TEST_COLL] end let(:client) do authorized_client_with_retry_writes end before do test.setup_test(collection) end after do test.clear_fail_point(collection) collection.delete_many end let(:results) do if test.error? error = nil begin; test.run(collection); rescue => e; error = e; end error else test.run(collection) end end it 'has the correct data in the collection', if: (sessions_enabled? && replica_set? && test.outcome_collection_data) do skip 'Test cannot be run on this server version' unless spec.server_version_satisfied?(client) results expect(collection.find.to_a).to match_collection_data(test) end if test.error? it 'raises an error', if: sessions_enabled? && replica_set? do skip 'Test cannot be run on this server version' unless spec.server_version_satisfied?(client) expect(results).to be_a(Mongo::Error) end else it 'returns the correct result', if: sessions_enabled? && replica_set? do skip 'Test cannot be run on this server version' unless spec.server_version_satisfied?(client) expect(results).to match_operation_result(test) end end end end end end describe 'Retryable writes integration tests' do let(:primary) do primary = client.cluster.next_primary end let(:primary_connection) do connection = primary.pool.checkout connection.connect! primary.pool.checkin(connection) connection end let(:primary_socket) do primary_connection.send(:socket) end after do authorized_collection.delete_many end shared_examples_for 'an operation that is retried' do context 'when the operation fails on the first attempt' do before do # Note that for writes, server.connectable? is called, refreshing the socket allow(primary).to receive(:connectable?).and_return(true) expect(primary_socket).to receive(:write).and_raise(error) end context 'when the error is retryable' do before do expect(Mongo::Logger.logger).to receive(:warn).once.and_call_original expect(client.cluster).to receive(:scan!) end context 'when the error is a SocketError' do let(:error) do Mongo::Error::SocketError end it 'retries writes' do operation expect(expectation).to eq(successful_retry_value) end end context 'when the error is a SocketTimeoutError' do let(:error) do Mongo::Error::SocketTimeoutError end it 'retries writes' do operation expect(expectation).to eq(successful_retry_value) end end context 'when the error is a retryable OperationFailure' do let(:error) do Mongo::Error::OperationFailure.new('not master') end it 'retries writes' do operation expect(expectation).to eq(successful_retry_value) end end end context 'when the error is not retryable' do context 'when the error is a non-retryable OperationFailure' do let(:error) do Mongo::Error::OperationFailure.new('other error') end it 'does not retry writes' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end end end context 'when the operation fails on the first attempt and again on the second attempt' do before do # Note that for writes, server.connectable? is called, refreshing the socket allow(primary).to receive(:connectable?).and_return(true) allow(primary_socket).to receive(:write).and_raise(error) end context 'when the selected server does not support retryable writes' do before do legacy_primary = double('legacy primary', :'retry_writes?' => false) allow(client.cluster).to receive(:next_primary).and_return(primary, legacy_primary) expect(primary_socket).to receive(:write).and_raise(error) end context 'when the error is a SocketError' do let(:error) do Mongo::Error::SocketError end it 'does not retry writes and raises the original error' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the error is a SocketTimeoutError' do let(:error) do Mongo::Error::SocketTimeoutError end it 'does not retry writes and raises the original error' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the error is a retryable OperationFailure' do let(:error) do Mongo::Error::OperationFailure.new('not master') end it 'does not retry writes and raises the original error' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end end [Mongo::Error::SocketError, Mongo::Error::SocketTimeoutError, Mongo::Error::OperationFailure.new('not master')].each do |retryable_error| context "when the first error is a #{retryable_error}" do let(:error) do retryable_error end before do bad_socket = primary_connection.address.socket(primary_connection.socket_timeout, primary_connection.send(:ssl_options)) good_socket = primary_connection.address.socket(primary_connection.socket_timeout, primary_connection.send(:ssl_options)) allow(bad_socket).to receive(:write).and_raise(second_error) allow(primary_connection.address).to receive(:socket).and_return(bad_socket, good_socket) end context 'when the second error is a SocketError' do let(:second_error) do Mongo::Error::SocketError end before do expect(client.cluster).to receive(:scan!).twice end it 'does not retry writes and raises the second error' do expect { operation }.to raise_error(second_error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the second error is a SocketTimeoutError' do before do expect(client.cluster).to receive(:scan!).twice end let(:second_error) do Mongo::Error::SocketTimeoutError end it 'does not retry writes and raises the second error' do expect { operation }.to raise_error(second_error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the second error is a retryable OperationFailure' do before do expect(client.cluster).to receive(:scan!).twice end let(:second_error) do Mongo::Error::OperationFailure.new('not master') end it 'does not retry writes and raises the second error' do expect { operation }.to raise_error(second_error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the second error is a non-retryable OperationFailure' do before do expect(client.cluster).to receive(:scan!).once end let(:second_error) do Mongo::Error::OperationFailure.new('other error') end it 'does not retry writes and raises the first error' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end context 'when the second error is a another error' do let(:second_error) do StandardError end it 'does not retry writes and raises the first error' do expect { operation }.to raise_error(error) expect(expectation).to eq(unsuccessful_retry_value) end end end end end end shared_examples_for 'an operation that is not retried' do before do # Note that for writes, server.connectable? is called, refreshing the socket allow(primary).to receive(:connectable?).and_return(true) expect(primary_socket).to receive(:write).and_raise(Mongo::Error::SocketError) expect(client.cluster).not_to receive(:scan!) end it 'does not retry writes' do expect { operation }.to raise_error(Mongo::Error::SocketError) expect(expectation).to eq(unsuccessful_retry_value) end end shared_examples_for 'an operation that does not support retryable writes' do let!(:client) do authorized_client_with_retry_writes end let!(:collection) do client[TEST_COLL, write: WRITE_CONCERN] end before do # Note that for writes, server.connectable? is called, refreshing the socket allow(primary).to receive(:connectable?).and_return(true) expect(primary_socket).to receive(:write).and_raise(Mongo::Error::SocketError) expect(client.cluster).not_to receive(:scan!) end it 'does not retry writes' do expect { operation }.to raise_error(Mongo::Error::SocketError) expect(expectation).to eq(unsuccessful_retry_value) end end shared_examples_for 'supported retryable writes' do context 'when the client has retry_writes set to true' do let!(:client) do authorized_client_with_retry_writes end context 'when the collection has write concern acknowledged' do let!(:collection) do client[TEST_COLL, write: WRITE_CONCERN] end context 'when the server supports retryable writes' do before do allow(primary).to receive(:retry_writes?).and_return(true) end if standalone? && sessions_enabled? it_behaves_like 'an operation that is not retried' elsif sessions_enabled? it_behaves_like 'an operation that is retried' end end context 'when the server does not support retryable writes' do before do allow(primary).to receive(:retry_writes?).and_return(false) end it_behaves_like 'an operation that is not retried' end end context 'when the collection has write concern unacknowledged' do let!(:collection) do client[TEST_COLL, write: { w: 0 }] end it_behaves_like 'an operation that is not retried' end context 'when the collection has write concern not set' do let!(:collection) do client[TEST_COLL] end context 'when the server supports retryable writes' do before do allow(primary).to receive(:retry_writes?).and_return(true) end if standalone? && sessions_enabled? it_behaves_like 'an operation that is not retried' elsif sessions_enabled? it_behaves_like 'an operation that is retried' end end context 'when the server does not support retryable writes' do before do allow(primary).to receive(:retry_writes?).and_return(false) end it_behaves_like 'an operation that is not retried' end end end context 'when the client has retry_writes set to false' do let!(:client) do authorized_client.with(retry_writes: false) end after do client.close end context 'when the collection has write concern acknowledged' do let!(:collection) do client[TEST_COLL, write: WRITE_CONCERN] end it_behaves_like 'an operation that is not retried' end context 'when the collection has write concern unacknowledged' do let!(:collection) do client[TEST_COLL, write: { w: 0 }] end it_behaves_like 'an operation that is not retried' end context 'when the collection has write concern not set' do let!(:collection) do client[TEST_COLL] end it_behaves_like 'an operation that is not retried' end end context 'when the client has retry_writes not set' do let!(:client) do authorized_client end context 'when the collection has write concern acknowledged' do let!(:collection) do client[TEST_COLL, write: WRITE_CONCERN] end it_behaves_like 'an operation that is not retried' end context 'when the collection has write concern unacknowledged' do let!(:collection) do client[TEST_COLL, write: { w: 0 }] end it_behaves_like 'an operation that is not retried' end context 'when the collection has write concern not set' do let!(:collection) do client[TEST_COLL] end it_behaves_like 'an operation that is not retried' end end end context 'when the operation is insert_one' do let(:operation) do collection.insert_one(a:1) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 1 end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'supported retryable writes' end context 'when the operation is update_one' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) end let(:operation) do collection.update_one({ a: 0 }, { '$set' => { a: 1 } }) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 1 end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'supported retryable writes' end context 'when the operation is replace_one' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) end let(:operation) do collection.replace_one({ a: 0 }, { a: 1 }) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 1 end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'supported retryable writes' end context 'when the operation is delete_one' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:1) end let(:operation) do collection.delete_one(a:1) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 0 end let(:unsuccessful_retry_value) do 1 end it_behaves_like 'supported retryable writes' end context 'when the operation is find_one_and_update' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) end let(:operation) do collection.find_one_and_update({ a: 0 }, { '$set' => { a: 1 } }) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 1 end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'supported retryable writes' end context 'when the operation is find_one_and_replace' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) end let(:operation) do collection.find_one_and_replace({ a: 0 }, { a: 3 }) end let(:expectation) do collection.find(a: 3).count end let(:successful_retry_value) do 1 end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'supported retryable writes' end context 'when the operation is find_one_and_delete' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:1) end let(:operation) do collection.find_one_and_delete({ a: 1 }) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 0 end let(:unsuccessful_retry_value) do 1 end it_behaves_like 'supported retryable writes' end context 'when the operation is update_many' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) authorized_collection.insert_one(a:0) end let(:operation) do collection.update_many({ a: 0 }, { '$set' => { a: 1 } }) end let(:expectation) do collection.find(a: 1).count end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'an operation that does not support retryable writes' end context 'when the operation is delete_many' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:1) authorized_collection.insert_one(a:1) end let(:operation) do collection.delete_many(a: 1) end let(:expectation) do collection.find(a: 1).count end let(:unsuccessful_retry_value) do 2 end it_behaves_like 'an operation that does not support retryable writes' end context 'when the operation is a bulk write' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a: 1) end let(:operation) do collection.bulk_write([{ delete_one: { filter: { a: 1 } } }, { insert_one: { a: 1 } }, { insert_one: { a: 1 } }]) end let(:expectation) do collection.find(a: 1).count end let(:successful_retry_value) do 2 end let(:unsuccessful_retry_value) do 1 end it_behaves_like 'supported retryable writes' end context 'when the operation is bulk write including delete_many' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:1) authorized_collection.insert_one(a:1) end let(:operation) do collection.bulk_write([{ delete_many: { filter: { a: 1 } } }]) end let(:expectation) do collection.find(a: 1).count end let(:unsuccessful_retry_value) do 2 end it_behaves_like 'an operation that does not support retryable writes' end context 'when the operation is bulk write including update_many' do before do # Account for when the collection has unacknowledged write concern and use authorized_collection here. authorized_collection.insert_one(a:0) authorized_collection.insert_one(a:0) end let(:operation) do collection.bulk_write([{ update_many: { filter: { a: 0 }, update: { a: 1 } } }]) end let(:expectation) do collection.find(a: 1).count end let(:unsuccessful_retry_value) do 0 end it_behaves_like 'an operation that does not support retryable writes' end end end mongo-2.5.1/spec/mongo/write_concern_spec.rb0000644000004100000410000000732213257253113021145 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::WriteConcern do describe '#get' do context 'when no options are set' do let(:options) do { } end it 'returns an Acknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Acknowledged) end end context 'when the value is a WriteConcern object' do let(:value) do Mongo::WriteConcern.get({}) end it 'returns the object' do expect(Mongo::WriteConcern.get(value)).to be(value) end end context 'when the value is nil' do it 'returns nil' do expect(Mongo::WriteConcern.get(nil)).to be(nil) end end context 'when w is 0' do context 'when no other options are provided' do let(:options) do { w: 0 } end it 'returns an Unacknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Unacknowledged) end end context 'when j is also provided' do context 'when j is false' do let(:options) do { w: 0, j: false } end it 'returns an Unacknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Unacknowledged) end end context 'when j is true' do let(:options) do { w: 0, j: true } end it 'raises an exception' do expect { Mongo::WriteConcern.get(options) }.to raise_error(Mongo::Error::InvalidWriteConcern) end end context 'when fsync is true' do let(:options) do { w: 0, fsync: true } end it 'raises an exception' do expect { Mongo::WriteConcern.get(options) }.to raise_error(Mongo::Error::InvalidWriteConcern) end end end context 'when wtimeout is also provided' do let(:options) do { w: 0, wimteout: 100 } end it 'returns an Unacknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Unacknowledged) end end end context 'when w is less than 0' do let(:options) do { w: -1 } end it 'raises an exception' do expect { Mongo::WriteConcern.get(options) }.to raise_error(Mongo::Error::InvalidWriteConcern) end end context 'when w is greater than 0' do let(:options) do { w: 2, journal: true } end it 'returns an Acknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Acknowledged) end it 'sets the options' do expect(Mongo::WriteConcern.get(options).options).to eq(options) end end context 'when w is a string' do let(:options) do { w: 'majority', journal: true } end it 'returns an Acknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Acknowledged) end it 'sets the options' do expect(Mongo::WriteConcern.get(options).options).to eq(options) end end context 'when w is a symbol' do let(:options) do { w: :majority, journal: true } end it 'returns an Acknowledged write concern object' do expect(Mongo::WriteConcern.get(options)).to be_a(Mongo::WriteConcern::Acknowledged) end it 'sets w to a string' do expect(Mongo::WriteConcern.get(options).options[:w]).to eq('majority') end end end end mongo-2.5.1/spec/mongo/cluster/0000755000004100000410000000000013257253113016422 5ustar www-datawww-datamongo-2.5.1/spec/mongo/cluster/topology/0000755000004100000410000000000013257253113020276 5ustar www-datawww-datamongo-2.5.1/spec/mongo/cluster/topology/replica_set_spec.rb0000644000004100000410000003726213257253113024141 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::Topology::ReplicaSet do let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:listeners) do Mongo::Event::Listeners.new end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) end end describe '#servers' do let(:mongos) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:standalone) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:replica_set) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:replica_set_two) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:mongos_description) do Mongo::Server::Description.new(address, { 'msg' => 'isdbgrid' }) end let(:standalone_description) do Mongo::Server::Description.new(address, { 'ismaster' => true }) end let(:replica_set_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'testing' }) end let(:replica_set_two_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'test' }) end before do mongos.monitor.instance_variable_set(:@description, mongos_description) standalone.monitor.instance_variable_set(:@description, standalone_description) replica_set.monitor.instance_variable_set(:@description, replica_set_description) replica_set_two.monitor.instance_variable_set(:@description, replica_set_two_description) end context 'when no replica set name is provided' do let(:topology) do described_class.new({}, monitoring, []) end let(:servers) do topology.servers([ mongos, standalone, replica_set, replica_set_two ]) end it 'returns only replica set members' do expect(servers).to eq([ replica_set, replica_set_two ]) end end context 'when a replica set name is provided' do let(:topology) do described_class.new({ :replica_set => 'testing' }, monitoring) end let(:servers) do topology.servers([ mongos, standalone, replica_set, replica_set_two ]) end it 'returns only replica set members is the provided set' do expect(servers).to eq([ replica_set ]) end end end describe '.replica_set?' do it 'returns true' do expect(described_class.new({}, monitoring)).to be_replica_set end end describe '.sharded?' do it 'returns false' do expect(described_class.new({}, monitoring)).to_not be_sharded end end describe '.single?' do it 'returns false' do expect(described_class.new({}, monitoring)).to_not be_single end end describe '#has_readable_servers?' do let(:topology) do described_class.new({}, monitoring, []) end let(:cluster) do double('cluster', servers: servers, single?: false, sharded?: false, unknown?: false) end context 'when the read preference is primary' do let(:selector) do Mongo::ServerSelector.get(:mode => :primary) end context 'when a primary exists' do let(:servers) do [ double('server', primary?: true) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end context 'when a primary does not exist' do let(:servers) do [ double('server', primary?: false) ] end it 'returns false' do expect(topology).to_not have_readable_server(cluster, selector) end end end context 'when the read preference is primary preferred' do let(:selector) do Mongo::ServerSelector.get(:mode => :primary_preferred) end context 'when a primary exists' do let(:servers) do [ double('server', primary?: true, secondary?: false) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end context 'when a primary does not exist' do let(:servers) do [ double('server', primary?: false, secondary?: true, average_round_trip_time: 0.01) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end end context 'when the read preference is secondary' do let(:selector) do Mongo::ServerSelector.get(:mode => :secondary) end context 'when a secondary exists' do let(:servers) do [ double('server', primary?: false, secondary?: true, average_round_trip_time: 0.01) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end context 'when a secondary does not exist' do let(:servers) do [ double('server', primary?: true, secondary?: false) ] end it 'returns false' do expect(topology).to_not have_readable_server(cluster, selector) end end end context 'when the read preference is secondary preferred' do let(:selector) do Mongo::ServerSelector.get(:mode => :secondary_preferred) end context 'when a secondary exists' do let(:servers) do [ double('server', primary?: false, secondary?: true, average_round_trip_time: 0.01) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end context 'when a secondary does not exist' do let(:servers) do [ double('server', secondary?: false, primary?: true) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end end context 'when the read preference is nearest' do let(:selector) do Mongo::ServerSelector.get(:mode => :nearest) end let(:servers) do [ double('server', primary?: false, secondary?: true, average_round_trip_time: 0.01) ] end it 'returns true' do expect(topology).to have_readable_server(cluster, selector) end end context 'when the read preference is not provided' do context 'when a primary exists' do let(:servers) do [ double('server', primary?: true, secondary?: false) ] end it 'returns true' do expect(topology).to have_readable_server(cluster) end end context 'when a primary does not exist' do let(:servers) do [ double('server', primary?: false, secondary?: true, average_round_trip_time: 0.01) ] end it 'returns false' do expect(topology).to_not have_readable_server(cluster) end end end end describe '#has_writable_servers?' do let(:topology) do described_class.new({}, monitoring, []) end context 'when a primary server exists' do let(:primary) do double('server', :primary? => true) end let(:secondary) do double('server', :primary? => false) end let(:cluster) do double('cluster', servers: [ primary, secondary ]) end it 'returns true' do expect(topology).to have_writable_server(cluster) end end context 'when no primary server exists' do let(:server) do double('server', :primary? => false) end let(:cluster) do double('cluster', servers: [ server ]) end it 'returns false' do expect(topology).to_not have_writable_server(cluster) end end end describe '#add_hosts?' do let(:primary) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:secondary) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:primary_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'testing' }) end let(:secondary_description) do Mongo::Server::Description.new(address, { 'ismaster' => false, 'secondary' => true, 'setName' => 'testing' }) end let(:topology) do described_class.new({ :replica_set => 'testing' }, monitoring) end before do primary.monitor.instance_variable_set(:@description, primary_description) secondary.monitor.instance_variable_set(:@description, secondary_description) end context 'when the list of servers does not include a primary' do let(:servers) do [ secondary ] end context 'when the description is a member of the replica set' do let(:description) do double('description').tap do |d| allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('testing') end end it 'returns true' do expect(topology.add_hosts?(description, servers)).to eq(true) end end context 'when the description is not a member of the replica set' do let(:description) do double('description').tap do |d| allow(d).to receive(:replica_set_member?).and_return(false) allow(d).to receive(:primary?).and_return(false) end end it 'returns false' do expect(topology.add_hosts?(description, servers)).to eq(false) end end end context 'when the list of servers has a primary' do let(:servers) do [ primary, secondary ] end let(:description) do double('description').tap do |d| allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('testing') allow(d).to receive(:primary?).and_return(false) end end it 'returns false' do expect(topology.add_hosts?(description, servers)).to eq(false) end end end describe '#remove_hosts?' do let(:primary) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:primary_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'testing' }) end let(:topology) do described_class.new({ :replica_set => 'testing' }, monitoring) end before do primary.monitor.instance_variable_set(:@description, primary_description) end context 'when the description has an empty config' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({}) end end it 'returns false' do expect(topology.remove_hosts?(description)).to eq(false) end end context 'when the description is from a primary' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'ismaster' => true }) allow(d).to receive(:primary?).and_return(true) end end it 'returns true' do expect(topology.remove_hosts?(description)).to eq(true) end end context 'when the description has an empty hosts list' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'ismaster' => true }) allow(d).to receive(:primary?).and_return(false) allow(d).to receive(:me_mismatch?).and_return(false) allow(d).to receive(:hosts).and_return([]) end end it 'returns true' do expect(topology.remove_hosts?(description)).to eq(true) end end context 'when the description is not from the replica set' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'ismaster' => true }) allow(d).to receive(:primary?).and_return(false) allow(d).to receive(:hosts).and_return([ primary ]) allow(d).to receive(:replica_set_name).and_return('test') allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:me_mismatch?).and_return(false) end end it 'returns true' do expect(topology.remove_hosts?(description)).to eq(true) end end end describe '#remove_server?' do let(:secondary) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:secondary_description) do Mongo::Server::Description.new(address, { 'ismaster' => false, 'secondary' => true, 'setName' => 'test' }) end let(:topology) do described_class.new({ :replica_set => 'testing' }, monitoring) end before do secondary.monitor.instance_variable_set(:@description, secondary_description) end context 'when the description is from a server that should itself be removed' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'setName' => 'test' }) allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('test') allow(d).to receive(:is_server?).and_return(true) allow(d).to receive(:ghost?).and_return(false) end end it 'returns true' do expect(topology.remove_server?(description, secondary)).to eq(true) end end context 'when the description is a member of the replica set' do context 'when the description includes the server in question' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'setName' => 'testing' }) allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('testing') allow(d).to receive(:lists_server?).and_return(true) allow(d).to receive(:servers).and_return([double('server')]) end end it 'returns false' do expect(topology.remove_server?(description, secondary)).to eq(false) end end context 'when the description does not include the server in question' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'setName' => 'testing' }) allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('testing') allow(d).to receive(:is_server?).and_return(false) allow(d).to receive(:lists_server?).and_return(false) allow(d).to receive(:servers).and_return([double('server')]) end end it 'returns true' do expect(topology.remove_server?(description, secondary)).to eq(true) end end end context 'when the description is not a member of the replica set' do let(:description) do double('description').tap do |d| allow(d).to receive(:config).and_return({ 'setName' => 'test' }) allow(d).to receive(:replica_set_member?).and_return(true) allow(d).to receive(:replica_set_name).and_return('test') allow(d).to receive(:is_server?).and_return(false) end end it 'returns false' do expect(topology.remove_server?(description, secondary)).to eq(false) end end end end mongo-2.5.1/spec/mongo/cluster/topology/unknown_spec.rb0000644000004100000410000001031713257253113023336 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::Topology::Unknown do let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:topology) do described_class.new({}, monitoring) end describe '.servers' do let(:servers) do topology.servers([ double('mongos'), double('standalone') ]) end it 'returns an empty array' do expect(servers).to eq([ ]) end end describe '.replica_set?' do it 'returns false' do expect(topology).to_not be_replica_set end end describe '.sharded?' do it 'returns false' do expect(topology).not_to be_sharded end end describe '.single?' do it 'returns false' do expect(topology).not_to be_single end end describe '.unknown?' do it 'returns true' do expect(topology.unknown?).to be(true) end end describe '#has_readable_servers?' do it 'returns false' do expect(topology).to_not have_readable_server(nil, nil) end end describe '#has_writable_servers?' do it 'returns false' do expect(topology).to_not have_writable_server(nil) end end describe '#add_hosts?' do context 'when the description is from an unknown server' do let(:description) do double('description').tap do |d| allow(d).to receive(:unknown?).and_return(true) end end it 'returns false' do expect(topology.add_hosts?(description, [])).to be(false) end end context 'when the description is from a ghost server' do let(:description) do double('description').tap do |d| allow(d).to receive(:unknown?).and_return(false) allow(d).to receive(:ghost?).and_return(true) end end it 'returns false' do expect(topology.add_hosts?(description, [])).to be(false) end end context 'when the description is not from an unknown or ghost' do let(:description) do double('description').tap do |d| allow(d).to receive(:unknown?).and_return(false) allow(d).to receive(:ghost?).and_return(false) end end it 'returns true' do expect(topology.add_hosts?(description, [])).to be(true) end end end describe '#remove_hosts?' do context 'when the description is from a standalone' do let(:description) do double('description').tap do |d| allow(d).to receive(:standalone?).and_return(true) end end it 'returns true' do expect(topology.remove_hosts?(description)).to be(true) end end context 'when the description is not from a standalone' do let(:description) do double('description').tap do |d| allow(d).to receive(:standalone?).and_return(false) end end it 'returns true' do expect(topology.remove_hosts?(description)).to be(false) end end end describe '#remove_server?' do context 'when the description is from a standalone' do let(:description) do double('description').tap do |d| allow(d).to receive(:standalone?).and_return(true) allow(d).to receive(:is_server?).and_return(true) end end context 'when the description is from the server in question' do it 'returns true' do expect(topology.remove_server?(description, double('server'))).to be(true) end end context 'when the description is not from the server in question' do let(:description) do double('description').tap do |d| allow(d).to receive(:standalone?).and_return(true) allow(d).to receive(:is_server?).and_return(false) end end it 'returns false' do expect(topology.remove_server?(description, double('server'))).to be(false) end end end context 'when the description is not from a standalone' do let(:description) do double('description').tap do |d| allow(d).to receive(:standalone?).and_return(false) end end it 'returns false' do expect(topology.remove_server?(description, double('server'))).to be(false) end end end end mongo-2.5.1/spec/mongo/cluster/topology/sharded_spec.rb0000644000004100000410000000711213257253113023250 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::Topology::Sharded do let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:topology) do described_class.new({}, monitoring) end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) end end let(:mongos) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:standalone) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:replica_set) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:mongos_description) do Mongo::Server::Description.new(address, { 'msg' => 'isdbgrid' }) end let(:standalone_description) do Mongo::Server::Description.new(address, { 'ismaster' => true }) end let(:replica_set_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'testing', 'ok' => 1 }) end describe '.servers' do before do mongos.monitor.instance_variable_set(:@description, mongos_description) standalone.monitor.instance_variable_set(:@description, standalone_description) replica_set.monitor.instance_variable_set(:@description, replica_set_description) end let(:servers) do topology.servers([ mongos, standalone, replica_set ]) end it 'returns only mongos servers' do expect(servers).to eq([ mongos ]) end end describe '.replica_set?' do it 'returns false' do expect(topology).to_not be_replica_set end end describe '.sharded?' do it 'returns true' do expect(topology).to be_sharded end end describe '.single?' do it 'returns false' do expect(topology).to_not be_single end end describe '#has_readable_servers?' do it 'returns true' do expect(topology).to have_readable_server(nil, nil) end end describe '#has_writable_servers?' do it 'returns true' do expect(topology).to have_writable_server(nil) end end describe '#add_hosts?' do it 'returns false' do expect(topology.add_hosts?(double('description'), [])).to eq(false) end end describe '#remove_hosts?' do it 'returns true' do expect(topology.remove_hosts?(double('description'))).to eq(true) end end describe '#remove_server?' do before do mongos.monitor.instance_variable_set(:@description, mongos_description) replica_set.monitor.instance_variable_set(:@description, replica_set_description) end context 'when the server itself should be removed' do let(:description) do double('description').tap do |d| allow(d).to receive(:mongos?).and_return(false) allow(d).to receive(:unknown?).and_return(false) allow(d).to receive(:is_server?).and_return(true) end end it 'returns true' do expect(topology.remove_server?(description, mongos)).to eq(true) end end context 'when the server is neither a mongos nor an unknown' do let(:description) do double('description').tap do |d| allow(d).to receive(:mongos?).and_return(true) allow(d).to receive(:is_server?).and_return(false) end end it 'returns true' do expect(topology.remove_server?(description, replica_set)).to eq(true) end end end end mongo-2.5.1/spec/mongo/cluster/topology/single_spec.rb0000644000004100000410000000572013257253113023122 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::Topology::Single do let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:topology) do described_class.new({}, monitoring) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:topology).and_return(topology) end end describe '.servers' do let(:mongos) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:standalone) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:standalone_two) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:replica_set) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:mongos_description) do Mongo::Server::Description.new(address, { 'msg' => 'isdbgrid' }) end let(:standalone_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'ok' => 1 }) end let(:replica_set_description) do Mongo::Server::Description.new(address, { 'ismaster' => true, 'setName' => 'testing' }) end before do mongos.monitor.instance_variable_set(:@description, mongos_description) standalone.monitor.instance_variable_set(:@description, standalone_description) standalone_two.monitor.instance_variable_set(:@description, standalone_description) replica_set.monitor.instance_variable_set(:@description, replica_set_description) end let(:servers) do topology.servers([ mongos, standalone, standalone_two, replica_set ]) end it 'returns only the first standalone server' do expect(servers).to eq([ standalone ]) end end describe '.replica_set?' do it 'returns false' do expect(topology).to_not be_replica_set end end describe '.sharded?' do it 'returns false' do expect(topology).to_not be_sharded end end describe '.single?' do it 'returns true' do expect(topology).to be_single end end describe '#has_readable_servers?' do it 'returns true' do expect(topology).to have_readable_server(nil, nil) end end describe '#has_writable_servers?' do it 'returns true' do expect(topology).to have_writable_server(nil) end end describe '#add_hosts?' do it 'returns false' do expect(topology.add_hosts?(double('description'), [])).to eq(false) end end describe '#remove_hosts?' do it 'returns false' do expect(topology.remove_hosts?(double('description'))).to eq(false) end end describe '#remove_server?' do it 'returns false' do expect(topology.remove_server?(double('description'), double('server'))).to eq(false) end end end mongo-2.5.1/spec/mongo/cluster/app_metadata_spec.rb0000644000004100000410000000534513257253113022410 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::AppMetadata do let(:app_metadata) do described_class.new(cluster) end let(:cluster) do authorized_client.cluster end describe '#initialize' do context 'when the cluster has an app name option set' do let(:cluster) do authorized_client.with(app_name: :reports).cluster end it 'sets the app name' do expect(app_metadata.send(:full_client_document)[:application][:name]).to eq(:reports) end context 'when the app name exceeds the max length of 128' do let(:cluster) do authorized_client.with(app_name: "\u3042"*43).cluster end it 'raises an error' do expect { app_metadata.send(:validate!) }.to raise_exception(Mongo::Error::InvalidApplicationName) end end end context 'when the cluster does not have an app name option set' do it 'does not set the app name' do expect(app_metadata.send(:full_client_document)[:application]).to be(nil) end end context 'when the client document exceeds the max of 512 bytes' do context 'when the os.type length is too long' do before do allow(app_metadata).to receive(:type).and_return('x'*500) end it 'truncates the document' do expect(app_metadata.send(:ismaster_bytes)).to be_a(String) end end context 'when the os.name length is too long' do before do allow(app_metadata).to receive(:name).and_return('x'*500) end it 'truncates the document' do expect(app_metadata.send(:ismaster_bytes)).to be_a(String) end end context 'when the os.architecture length is too long' do before do allow(app_metadata).to receive(:architecture).and_return('x'*500) end it 'truncates the document' do expect(app_metadata.send(:ismaster_bytes)).to be_a(String) end end context 'when the platform length is too long' do before do allow(app_metadata).to receive(:platform).and_return('x'*500) end it 'truncates the document to be just an ismaster command' do expect(app_metadata.send(:ismaster_bytes)).to be_a(String) end end context 'when the driver info is too long' do before do allow(app_metadata).to receive(:driver_doc).and_return('x'*500) end it 'truncates the document to be just an ismaster command and the compressors', unless: compression_enabled? do expect(app_metadata.ismaster_bytes.length).to eq(Mongo::Server::Monitor::Connection::ISMASTER_BYTES.length + 26) end end end end endmongo-2.5.1/spec/mongo/cluster/topology_spec.rb0000644000004100000410000000664113257253113021644 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::Topology do let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end describe '.initial' do context 'when provided a replica set option' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: :replica_set) end it 'returns a replica set topology' do expect(topology).to be_a(Mongo::Cluster::Topology::ReplicaSet) end context 'when the option is a String (due to YAML parsing)' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: 'replica_set') end it 'returns a replica set topology' do expect(topology).to be_a(Mongo::Cluster::Topology::ReplicaSet) end end end context 'when provided a single option' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: :direct) end it 'returns a single topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Single) end it 'sets the seed on the topology' do expect(topology.seed).to eq('a') end context 'when the option is a String (due to YAML parsing)' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: 'direct') end it 'returns a single topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Single) end it 'sets the seed on the topology' do expect(topology.seed).to eq('a') end end end context 'when provided a sharded option' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: :sharded) end it 'returns a sharded topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Sharded) end context 'when the option is a String (due to YAML parsing)' do let(:topology) do described_class.initial([ 'a' ], monitoring, connect: 'sharded') end it 'returns a sharded topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Sharded) end end end context 'when provided no option' do context 'when a set name is in the options' do let(:topology) do described_class.initial([], monitoring, replica_set: 'testing') end it 'returns a replica set topology' do expect(topology).to be_a(Mongo::Cluster::Topology::ReplicaSet) end end context 'when no set name is in the options' do let(:topology) do described_class.initial([], monitoring, {}) end it 'returns an unknown topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Unknown) end end context 'when provided a single mongos', if: single_mongos? do let(:topology) do described_class.initial(ADDRESSES, monitoring, TEST_OPTIONS) end it 'returns a sharded topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Sharded) end end context 'when provided a single replica set member', if: single_rs_member? do let(:topology) do described_class.initial(ADDRESSES, monitoring, TEST_OPTIONS) end it 'returns a single topology' do expect(topology).to be_a(Mongo::Cluster::Topology::Single) end end end end end mongo-2.5.1/spec/mongo/cluster/socket_reaper_spec.rb0000644000004100000410000000112413257253113022605 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::SocketReaper do let(:cluster) do authorized_client.cluster end let(:reaper) do described_class.new(cluster) end describe '#initialize' do it 'takes a cluster as an argument' do expect(reaper).to be_a(described_class) end end describe '#execute' do before do cluster.servers.each do |s| expect(s.pool).to receive(:close_stale_sockets!).and_call_original end end it 'calls close_stale_sockets on each connection pool in the cluster' do reaper.execute end end end mongo-2.5.1/spec/mongo/cluster/cursor_reaper_spec.rb0000644000004100000410000000660313257253113022641 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster::CursorReaper do after do authorized_collection.delete_many end let(:reaper) do described_class.new end let(:active_cursors) do reaper.instance_variable_get(:@active_cursors) end describe '#intialize' do it 'initializes a hash for servers and their kill cursors ops' do expect(reaper.instance_variable_get(:@to_kill)).to be_a(Hash) end it 'initializes a set for the list of active cursors' do expect(reaper.instance_variable_get(:@active_cursors)).to be_a(Set) end end describe '#schedule_kill_cursor' do let(:server) { double('server') } let(:cursor_id) { 1 } let(:op_spec_1) { double('op_spec_1') } let(:op_spec_2) { double('op_spec_2') } let(:to_kill) { reaper.instance_variable_get(:@to_kill)} context 'when the cursor is on the list of active cursors' do before do reaper.register_cursor(cursor_id) end context 'when there is not a list already for the server' do before do reaper.schedule_kill_cursor(cursor_id, op_spec_1, server) end it 'initializes the list of op specs to a set' do expect(to_kill.keys).to eq([ server ]) expect(to_kill[server]).to eq(Set.new([op_spec_1])) end end context 'when there is a list of ops already for the server' do before do reaper.schedule_kill_cursor(cursor_id, op_spec_1, server) reaper.schedule_kill_cursor(cursor_id, op_spec_2, server) end it 'adds the op to the server list' do expect(to_kill.keys).to eq([ server ]) expect(to_kill[server]).to contain_exactly(op_spec_1, op_spec_2) end context 'when the same op is added more than once' do before do reaper.schedule_kill_cursor(cursor_id, op_spec_2, server) end it 'does not allow duplicates ops for a server' do expect(to_kill.keys).to eq([ server ]) expect(to_kill[server]).to contain_exactly(op_spec_1, op_spec_2) end end end end context 'when the cursor is not on the list of active cursors' do before do reaper.schedule_kill_cursor(cursor_id, op_spec_1, server) end it 'does not add the kill cursors op spec to the list' do expect(to_kill).to eq({}) end end end describe '#register_cursor' do before do reaper.register_cursor(cursor_id) end context 'when the cursor id is nil' do let(:cursor_id) do nil end it 'does not register the cursor' do expect(active_cursors.size).to be(0) end end context 'when the cursor id is 0' do let(:cursor_id) do 0 end it 'does not register the cursor' do expect(active_cursors.size).to be(0) end end context 'when the cursor id is a valid id' do let(:cursor_id) do 2 end it 'registers the cursor id as active' do expect(active_cursors).to eq(Set.new([2])) end end end describe '#unregister_cursor' do context 'when the cursor id is in the active cursors list' do before do reaper.register_cursor(2) reaper.unregister_cursor(2) end it 'removes the cursor id' do expect(active_cursors.size).to eq(0) end end end end mongo-2.5.1/spec/mongo/monitoring_spec.rb0000644000004100000410000000637213257253113020475 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Monitoring do describe '#dup' do let(:monitoring) do described_class.new end let(:copy) do monitoring.dup end it 'dups the subscribers' do expect(monitoring.subscribers).to_not equal(copy.subscribers) end it 'keeps the same subscriber instances' do expect(monitoring.subscribers).to eq(copy.subscribers) end context 'when adding to the copy' do let(:subscriber) do double('subscriber') end before do copy.subscribe('topic', subscriber) end it 'does not modify the original subscribers' do expect(monitoring.subscribers).to_not eq(copy.subscribers) end end end describe '#initialize' do context 'when no monitoring options provided' do let(:monitoring) do described_class.new end it 'includes the global subscribers' do expect(monitoring.subscribers.size).to eq(6) end end context 'when monitoring options provided' do context 'when monitoring is true' do let(:monitoring) do described_class.new(monitoring: true) end it 'includes the global subscribers' do expect(monitoring.subscribers.size).to eq(6) end end context 'when monitoring is false' do let(:monitoring) do described_class.new(monitoring: false) end it 'does not include the global subscribers' do expect(monitoring.subscribers).to be_empty end end end end describe '#subscribe' do let(:monitoring) do described_class.new(monitoring: false) end let(:subscriber) do double('subscriber') end before do monitoring.subscribe('topic', subscriber) end it 'subscribes to the topic' do expect(monitoring.subscribers['topic']).to eq([ subscriber ]) end end describe '#started' do let(:monitoring) do described_class.new(monitoring: false) end let(:subscriber) do double('subscriber') end let(:event) do double('event') end before do monitoring.subscribe('topic', subscriber) end it 'calls the started method on each subscriber' do expect(subscriber).to receive(:started).with(event) monitoring.started('topic', event) end end describe '#succeeded' do let(:monitoring) do described_class.new(monitoring: false) end let(:subscriber) do double('subscriber') end let(:event) do double('event') end before do monitoring.subscribe('topic', subscriber) end it 'calls the succeeded method on each subscriber' do expect(subscriber).to receive(:succeeded).with(event) monitoring.succeeded('topic', event) end end describe '#failed' do let(:monitoring) do described_class.new(monitoring: false) end let(:subscriber) do double('subscriber') end let(:event) do double('event') end before do monitoring.subscribe('topic', subscriber) end it 'calls the failed method on each subscriber' do expect(subscriber).to receive(:failed).with(event) monitoring.failed('topic', event) end end end mongo-2.5.1/spec/mongo/dns_seedlist_discovery_spec.rb0000644000004100000410000000474313257253113023057 0ustar www-datawww-datarequire 'spec_helper' describe 'DNS Seedlist Discovery' do if test_connecting_externally? include Mongo::ConnectionString before(:all) do module Mongo class Server # The constructor keeps the same API, but does not instantiate a # monitor and run it. alias :original_initialize :initialize def initialize(address, cluster, monitoring, event_listeners, options = {}) @address = address @cluster = cluster @monitoring = monitoring @options = options.freeze @monitor = Monitor.new(address, event_listeners, options) end # Disconnect simply needs to return true since we have no monitor and # no connection. alias :original_disconnect! :disconnect! def disconnect!; true; end end end end after(:all) do module Mongo class Server alias :initialize :original_initialize remove_method(:original_initialize) alias :disconnect! :original_disconnect! remove_method(:original_disconnect!) end end end DNS_SEEDLIST_DISCOVERY_TESTS.each do |file_name| file = File.new(file_name) spec = YAML.load(ERB.new(file.read).result) file.close test = Mongo::ConnectionString::Test.new(spec) context(File.basename(file_name), if: test_connecting_externally?) do context 'when the uri is invalid', if: test.raise_error? do let(:valid_errors) do [ Mongo::Error::InvalidTXTRecord, Mongo::Error::NoSRVRecords, Mongo::Error::InvalidURI, Mongo::Error::MismatchedDomain, ] end let(:error) do e = nil begin; test.uri; rescue => ex; e = ex; end e end it 'raises an error' do expect(valid_errors).to include(error.class) end end context 'when the uri is valid', unless: test.raise_error? do it 'does not raise an exception' do expect(test.uri).to be_a(Mongo::URI::SRVProtocol) end it 'creates a client with the correct hosts' do expect(test.client).to have_hosts(test) end it 'creates a client with the correct options' do expect(test.client).to match_options(test) end end end end end end mongo-2.5.1/spec/mongo/session_spec.rb0000644000004100000410000001372613257253113017774 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Session, if: test_sessions? do let(:session) do authorized_client.start_session(options) end let(:options) do {} end describe '#initialize' do context 'when options are provided' do it 'duplicates and freezes the options' do expect(session.options).not_to be(options) expect(session.options.frozen?).to be(true) end end it 'sets a server session with an id' do expect(session.session_id).to be_a(BSON::Document) end it 'sets the cluster time to nil' do expect(session.cluster_time).to be(nil) end it 'sets the cluster' do expect(session.cluster).to be(authorized_client.cluster) end end describe '#inspect' do it 'includes the Ruby object_id in the formatted string' do expect(session.inspect).to include(session.object_id.to_s) end it 'includes the session_id in the formatted string' do expect(session.inspect).to include(session.session_id.to_s) end context 'when options are provided' do let(:options) do { causal_consistency: true } end it 'includes the options in the formatted string' do expect(session.inspect).to include({ implicit: false, causal_consistency: true }.to_s) end end end describe '#advance_cluster_time' do let(:new_cluster_time) do { 'clusterTime' => BSON::Timestamp.new(0, 5) } end context 'when the session does not have a cluster time' do before do session.advance_cluster_time(new_cluster_time) end it 'sets the new cluster time' do expect(session.cluster_time).to eq(new_cluster_time) end end context 'when the session already has a cluster time' do context 'when the original cluster time is less than the new cluster time' do let(:original_cluster_time) do { 'clusterTime' => BSON::Timestamp.new(0, 1) } end before do session.instance_variable_set(:@cluster_time, original_cluster_time) session.advance_cluster_time(new_cluster_time) end it 'sets the new cluster time' do expect(session.cluster_time).to eq(new_cluster_time) end end context 'when the original cluster time is equal or greater than the new cluster time' do let(:original_cluster_time) do { 'clusterTime' => BSON::Timestamp.new(0, 6) } end before do session.instance_variable_set(:@cluster_time, original_cluster_time) session.advance_cluster_time(new_cluster_time) end it 'does not update the cluster time' do expect(session.cluster_time).to eq(original_cluster_time) end end end end describe '#advance_operation_time' do let(:new_operation_time) do BSON::Timestamp.new(0, 5) end context 'when the session does not have an operation time' do before do session.advance_operation_time(new_operation_time) end it 'sets the new operation time' do expect(session.operation_time).to eq(new_operation_time) end end context 'when the session already has an operation time' do context 'when the original operation time is less than the new operation time' do let(:original_operation_time) do BSON::Timestamp.new(0, 1) end before do session.instance_variable_set(:@operation_time, original_operation_time) session.advance_operation_time(new_operation_time) end it 'sets the new operation time' do expect(session.operation_time).to eq(new_operation_time) end end context 'when the original operation time is equal or greater than the new operation time' do let(:original_operation_time) do BSON::Timestamp.new(0, 6) end before do session.instance_variable_set(:@operation_time, original_operation_time) session.advance_operation_time(new_operation_time) end it 'does not update the operation time' do expect(session.operation_time).to eq(original_operation_time) end end end end describe 'ended?' do context 'when the session has not been ended' do it 'returns false' do expect(session.ended?).to be(false) end end context 'when the session has been ended' do before do session.end_session end it 'returns true' do expect(session.ended?).to be(true) end end end describe 'end_session' do let!(:server_session) do session.instance_variable_get(:@server_session) end let(:cluster_session_pool) do session.cluster.session_pool end it 'returns the server session to the cluster session pool' do session.end_session expect(cluster_session_pool.instance_variable_get(:@queue)).to include(server_session) end context 'when #end_session is called multiple times' do before do session.end_session end it 'returns nil' do expect(session.end_session).to be_nil end end end describe '#retry_writes?', if: test_sessions? do context 'when the option is set to true' do let(:client) do authorized_client_with_retry_writes end it 'returns true' do expect(client.start_session.retry_writes?).to be(true) end end context 'when the option is set to false' do let(:client) do authorized_client.with(retry_writes: false) end after do client.close end it 'returns false' do expect(client.start_session.retry_writes?).to be(false) end end context 'when the option is not defined' do let(:client) do authorized_client end it 'returns false' do expect(client.start_session.retry_writes?).to be(false) end end end end mongo-2.5.1/spec/mongo/auth/0000755000004100000410000000000013257253113015702 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth/cr_spec.rb0000644000004100000410000000365213257253113017653 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::CR do let(:address) do default_address end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:cluster_time).and_return(nil) allow(cl).to receive(:update_cluster_time) end end let(:topology) do double('topology') end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:connection) do Mongo::Server::Connection.new(server, TEST_OPTIONS) end describe '#login' do context 'when the user is not authorized' do let(:user) do Mongo::Auth::User.new( database: 'driver', user: 'notauser', password: 'password' ) end let(:cr) do described_class.new(user) end let(:login) do cr.login(connection).documents[0] end it 'raises an exception' do expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end context 'when compression is used', if: testing_compression? do it 'does not compress the message' do expect(Mongo::Protocol::Compressed).not_to receive(:new) expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end end end end context 'when the user is authorized for the database' do let(:cr) do described_class.new(root_user) end let(:login) do cr.login(connection).documents[0] end it 'logs the user into the connection', unless: scram_sha_1_enabled? do expect(cr.login(connection).documents[0]['ok']).to eq(1) end end end mongo-2.5.1/spec/mongo/auth/ldap_spec.rb0000644000004100000410000000240213257253113020157 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::LDAP do let(:address) do default_address end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:cluster_time).and_return(nil) allow(cl).to receive(:update_cluster_time) end end let(:topology) do double('topology') end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:connection) do Mongo::Server::Connection.new(server, TEST_OPTIONS) end let(:user) do Mongo::Auth::User.new(database: TEST_DB, user: 'driver', password: 'password') end describe '#login' do context 'when the user is not authorized for the database' do let(:cr) do described_class.new(user) end let(:login) do cr.login(connection).documents[0] end it 'logs the user into the connection' do expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end end end end mongo-2.5.1/spec/mongo/auth/user_spec.rb0000644000004100000410000000740013257253113020220 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::User do let(:options) do { database: 'testing', user: 'user', password: 'pass' } end let(:user) do described_class.new(options) end describe '#auth_key' do let(:nonce) do end let(:expected) do Digest::MD5.hexdigest("#{nonce}#{user.name}#{user.hashed_password}") end it 'returns the users authentication key' do expect(user.auth_key(nonce)).to eq(expected) end end describe '#encoded_name' do context 'when the user name contains an =' do let(:options) do { user: 'user=' } end it 'escapes the = character to =3D' do expect(user.encoded_name).to eq('user=3D') end it 'returns a UTF-8 string' do expect(user.encoded_name.encoding.name).to eq('UTF-8') end end context 'when the user name contains a ,' do let(:options) do { user: 'user,' } end it 'escapes the , character to =2C' do expect(user.encoded_name).to eq('user=2C') end it 'returns a UTF-8 string' do expect(user.encoded_name.encoding.name).to eq('UTF-8') end end context 'when the user name contains no special characters' do it 'does not alter the user name' do expect(user.name).to eq('user') end it 'returns a UTF-8 string' do expect(user.encoded_name.encoding.name).to eq('UTF-8') end end end describe '#initialize' do it 'sets the database' do expect(user.database).to eq('testing') end it 'sets the name' do expect(user.name).to eq('user') end it 'sets the password' do expect(user.password).to eq('pass') end end describe '#hashed_password' do let(:expected) do Digest::MD5.hexdigest("user:mongo:pass") end it 'returns the hashed password' do expect(user.hashed_password).to eq(expected) end end describe '#mechanism' do context 'when the option is provided' do let(:options) do { database: 'testing', user: 'user', password: 'pass', auth_mech: :plain } end let(:user) do described_class.new(options) end it 'returns the option' do expect(user.mechanism).to eq(:plain) end end context 'when no option is provided' do let(:user) do described_class.new(options) end it 'returns the default' do expect(user.mechanism).to eq(:mongodb_cr) end end end describe '#auth_mech_properties' do context 'when the option is provided' do let(:auth_mech_properties) do { service_name: 'test', service_realm: 'test', canonicalize_host_name: true } end let(:options) do { database: 'testing', user: 'user', password: 'pass', auth_mech_properties: auth_mech_properties } end let(:user) do described_class.new(options) end it 'returns the option' do expect(user.auth_mech_properties).to eq(auth_mech_properties) end end context 'when no option is provided' do let(:user) do described_class.new(options) end it 'returns an empty hash' do expect(user.auth_mech_properties).to eq({}) end end end describe '#roles' do context 'when roles are provided' do let(:roles) do [ Mongo::Auth::Roles::ROOT ] end let(:user) do described_class.new(roles: roles) end it 'returns the roles' do expect(user.roles).to eq(roles) end end context 'when no roles are provided' do let(:user) do described_class.new({}) end it 'returns an empty array' do expect(user.roles).to be_empty end end end end mongo-2.5.1/spec/mongo/auth/user/0000755000004100000410000000000013257253113016660 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth/user/view_spec.rb0000644000004100000410000001712713257253113021201 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::User::View do let(:view) do described_class.new(root_authorized_client.database) end after do begin; view.remove('durran'); rescue; end end describe '#create' do context 'when a session is not used' do let!(:response) do view.create( 'durran', password: 'password', roles: [Mongo::Auth::Roles::READ_WRITE] ) end context 'when user creation was successful' do it 'saves the user in the database' do expect(response).to be_successful end context 'when compression is used', if: testing_compression? do it 'does not compress the message' do # The dropUser command message will be compressed, so expect instantiation once. expect(Mongo::Protocol::Compressed).to receive(:new).once.and_call_original expect(response).to be_successful end end end context 'when creation was not successful' do it 'raises an exception' do expect { view.create('durran', password: 'password') }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when a session is used' do let(:operation) do view.create( 'durran', password: 'password', roles: [Mongo::Auth::Roles::READ_WRITE], session: session ) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end end describe '#update' do before do view.create( 'durran', password: 'password', roles: [Mongo::Auth::Roles::READ_WRITE] ) end context 'when a user password is updated' do context 'when a session is not used' do let!(:response) do view.update( 'durran', password: '123', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end it 'updates the password' do expect(response).to be_successful end context 'when compression is used', if: testing_compression? do it 'does not compress the message' do # The dropUser command message will be compressed, so expect instantiation once. expect(Mongo::Protocol::Compressed).to receive(:new).once.and_call_original expect(response).to be_successful end end end context 'when a session is used' do let(:operation) do view.update( 'durran', password: '123', roles: [ Mongo::Auth::Roles::READ_WRITE ], session: session ) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end end context 'when the roles of a user are updated' do context 'when a session is not used' do let!(:response) do view.update( 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ ] ) end it 'updates the roles' do expect(response).to be_successful end context 'when compression is used', if: testing_compression? do it 'does not compress the message' do # The dropUser command message will be compressed, so expect instantiation once. expect(Mongo::Protocol::Compressed).to receive(:new).once.and_call_original expect(response).to be_successful end end end context 'when a session is used' do let(:operation) do view.update( 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ ], session: session ) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end end end describe '#remove' do context 'when a session is not used' do context 'when user removal was successful' do before do view.create( 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end let(:response) do view.remove('durran') end it 'saves the user in the database' do expect(response).to be_successful end end context 'when removal was not successful' do it 'raises an exception' do expect { view.remove('notauser') }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when a session is used' do context 'when user removal was successful' do before do view.create( 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end let(:operation) do view.remove('durran', session: session) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end context 'when removal was not successful' do let(:failed_operation) do view.remove('notauser', session: session) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'a failed operation using a session' end end end describe '#info' do context 'when a session is not used' do context 'when a user exists in the database' do before do view.create( 'emily', password: 'password' ) end after do view.remove('emily') end it 'returns information for that user' do expect(view.info('emily')).to_not be_empty end end context 'when a user does not exist in the database' do it 'returns nil' do expect(view.info('emily')).to be_empty end end context 'when a user is not authorized' do let(:view) do described_class.new(unauthorized_client.database) end it 'raises an OperationFailure', if: auth_enabled? do expect{ view.info('emily') }.to raise_exception(Mongo::Error::OperationFailure) end end end context 'when a session is used' do context 'when a user exists in the database' do before do view.create( 'durran', password: 'password' ) end let(:operation) do view.info('durran', session: session) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end context 'when a user does not exist in the database' do let(:operation) do view.info('emily', session: session) end let(:session) do client.start_session end let(:client) do root_authorized_client end it_behaves_like 'an operation using a session' end end end end mongo-2.5.1/spec/mongo/auth/ldap/0000755000004100000410000000000013257253113016622 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth/ldap/conversation_spec.rb0000644000004100000410000000142613257253113022676 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::LDAP::Conversation do let(:user) do Mongo::Auth::User.new( database: Mongo::Database::ADMIN, user: 'user', password: 'pencil' ) end let(:conversation) do described_class.new(user) end describe '#start' do let(:query) do conversation.start end let(:selector) do query.selector end it 'sets the sasl start flag' do expect(selector[:saslStart]).to eq(1) end it 'sets the auto authorize flag' do expect(selector[:autoAuthorize]).to eq(1) end it 'sets the mechanism' do expect(selector[:mechanism]).to eq('PLAIN') end it 'sets the payload' do expect(selector[:payload].data).to eq("\x00user\x00pencil") end end end mongo-2.5.1/spec/mongo/auth/x509/0000755000004100000410000000000013257253113016407 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth/x509/conversation_spec.rb0000644000004100000410000000245313257253113022464 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::X509::Conversation do let(:user) do Mongo::Auth::User.new( database: Mongo::Database::ADMIN, user: 'user', ) end let(:conversation) do described_class.new(user) end describe '#start' do let(:query) do conversation.start end let(:selector) do query.selector end it 'sets username' do expect(selector[:user]).to eq('user') end it 'sets the mechanism' do expect(selector[:mechanism]).to eq('MONGODB-X509') end context 'when a username is not provided' do let(:user) do Mongo::Auth::User.new( database: Mongo::Database::ADMIN ) end it 'does not set the username' do expect(selector[:user]).to be_nil end it 'sets the mechanism' do expect(selector[:mechanism]).to eq('MONGODB-X509') end end context 'when the username is nil' do let(:user) do Mongo::Auth::User.new( database: Mongo::Database::ADMIN, user: nil ) end it 'does not set the username' do expect(selector.has_key?(:user)).to be(false) end it 'sets the mechanism' do expect(selector[:mechanism]).to eq('MONGODB-X509') end end end end mongo-2.5.1/spec/mongo/auth/scram/0000755000004100000410000000000013257253113017007 5ustar www-datawww-datamongo-2.5.1/spec/mongo/auth/scram/conversation_spec.rb0000644000004100000410000001054613257253113023066 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::SCRAM::Conversation do let(:user) do Mongo::Auth::User.new( database: Mongo::Database::ADMIN, user: 'user', password: 'pencil' ) end let(:conversation) do described_class.new(user) end describe '#start' do let(:query) do conversation.start end before do expect(SecureRandom).to receive(:base64).once.and_return('NDA2NzU3MDY3MDYwMTgy') end let(:selector) do query.selector end it 'sets the sasl start flag' do expect(selector[:saslStart]).to eq(1) end it 'sets the auto authorize flag' do expect(selector[:autoAuthorize]).to eq(1) end it 'sets the mechanism' do expect(selector[:mechanism]).to eq('SCRAM-SHA-1') end it 'sets the payload' do expect(selector[:payload].data).to eq('n,,n=user,r=NDA2NzU3MDY3MDYwMTgy') end end describe '#continue' do let(:reply) do Mongo::Protocol::Message.new end let(:documents) do [{ 'conversationId' => 1, 'done' => false, 'payload' => payload, 'ok' => 1.0 }] end before do expect(SecureRandom).to receive(:base64).once.and_return('NDA2NzU3MDY3MDYwMTgy') allow(reply).to receive(:documents).and_return(documents) end context 'when the server rnonce starts with the nonce' do let(:payload) do BSON::Binary.new( 'r=NDA2NzU3MDY3MDYwMTgyt7/+IWaw1HaZZ5NmPJUTWapLpH2Gg+d8,s=AVvQXzAbxweH2RYDICaplw==,i=10000' ) end let(:query) do conversation.continue(reply) end let(:selector) do query.selector end it 'sets the conversation id' do expect(selector[:conversationId]).to eq(1) end it 'sets the payload' do expect(selector[:payload].data).to eq( 'c=biws,r=NDA2NzU3MDY3MDYwMTgyt7/+IWaw1HaZZ5NmPJUTWapLpH2Gg+d8,p=qYUYNy6SQ9Jucq9rFA9nVgXQdbM=' ) end it 'sets the continue flag' do expect(selector[:saslContinue]).to eq(1) end end context 'when the server nonce does not start with the nonce' do let(:payload) do BSON::Binary.new( 'r=NDA2NzU4MDY3MDYwMTgyt7/+IWaw1HaZZ5NmPJUTWapLpH2Gg+d8,s=AVvQXzAbxweH2RYDICaplw==,i=10000' ) end it 'raises an error' do expect { conversation.continue(reply) }.to raise_error(Mongo::Error::InvalidNonce) end end end describe '#finalize' do let(:continue_reply) do Mongo::Protocol::Message.new end let(:continue_documents) do [{ 'conversationId' => 1, 'done' => false, 'payload' => continue_payload, 'ok' => 1.0 }] end let(:continue_payload) do BSON::Binary.new( 'r=NDA2NzU3MDY3MDYwMTgyt7/+IWaw1HaZZ5NmPJUTWapLpH2Gg+d8,s=AVvQXzAbxweH2RYDICaplw==,i=10000' ) end let(:reply) do Mongo::Protocol::Message.new end let(:documents) do [{ 'conversationId' => 1, 'done' => false, 'payload' => payload, 'ok' => 1.0 }] end before do expect(SecureRandom).to receive(:base64).once.and_return('NDA2NzU3MDY3MDYwMTgy') allow(continue_reply).to receive(:documents).and_return(continue_documents) allow(reply).to receive(:documents).and_return(documents) end context 'when the verifier matches the server signature' do let(:payload) do BSON::Binary.new('v=gwo9E8+uifshm7ixj441GvIfuUY=') end let(:query) do conversation.continue(continue_reply) conversation.finalize(reply) end let(:selector) do query.selector end it 'sets the conversation id' do expect(selector[:conversationId]).to eq(1) end it 'sets the empty payload' do expect(selector[:payload].data).to eq('') end it 'sets the continue flag' do expect(selector[:saslContinue]).to eq(1) end end context 'when the verifier does not match the server signature' do let(:payload) do BSON::Binary.new('v=LQ+8yhQeVL2a3Dh+TDJ7xHz4Srk=') end it 'raises an error' do expect { conversation.continue(continue_reply) conversation.finalize(reply) }.to raise_error(Mongo::Error::InvalidSignature) end end end end mongo-2.5.1/spec/mongo/auth/x509_spec.rb0000644000004100000410000000241013257253113017743 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::X509 do let(:address) do default_address end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:cluster_time).and_return(nil) allow(cl).to receive(:update_cluster_time) end end let(:topology) do double('topology') end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:connection) do Mongo::Server::Connection.new(server, TEST_OPTIONS) end let(:user) do Mongo::Auth::User.new(database: TEST_DB, user: 'driver', password: 'password') end describe '#login' do context 'when the user is not authorized for the database' do let(:x509) do described_class.new(user) end let(:login) do x509.login(connection).documents[0] end it 'logs the user into the connection' do expect { x509.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end end end end mongo-2.5.1/spec/mongo/auth/scram_spec.rb0000644000004100000410000000442213257253113020350 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Auth::SCRAM do let(:address) do default_address end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:cluster_time).and_return(nil) allow(cl).to receive(:update_cluster_time) end end let(:topology) do double('topology') end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:connection) do Mongo::Server::Connection.new(server, TEST_OPTIONS) end describe '#login' do context 'when the user is not authorized' do let(:user) do Mongo::Auth::User.new( database: 'driver', user: 'notauser', password: 'password' ) end let(:cr) do described_class.new(user) end it 'raises an exception', if: list_command_enabled? do expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end context 'when compression is used', if: testing_compression? do it 'does not compress the message' do expect(Mongo::Protocol::Compressed).not_to receive(:new) expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end end end end context 'when the user is authorized for the database' do let(:cr) do described_class.new(root_user) end let(:login) do cr.login(connection).documents[0] end after do root_user.instance_variable_set(:@client_key, nil) end it 'logs the user into the connection and caches the client key', if: list_command_enabled? do expect(login['ok']).to eq(1) expect(root_user.send(:client_key)).not_to be_nil end it 'raises an exception when an incorrect client key is set', if: list_command_enabled? do root_user.instance_variable_set(:@client_key, "incorrect client key") expect { cr.login(connection) }.to raise_error(Mongo::Auth::Unauthorized) end end end mongo-2.5.1/spec/mongo/uri_spec.rb0000644000004100000410000006605013257253113017106 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::URI do describe '.get' do let(:uri) { described_class.get(string) } context 'when the scheme is mongodb://' do let(:string) do 'mongodb://localhost:27017' end it 'returns a Mongo::URI object' do expect(uri).to be_a(Mongo::URI) end end context 'when the scheme is mongodb+srv://', if: test_connecting_externally? do let(:string) do 'mongodb+srv://test5.test.build.10gen.cc' end it 'returns a Mongo::URI::SRVProtocol object' do expect(uri).to be_a(Mongo::URI::SRVProtocol) end end context 'when the scheme is invalid' do let(:string) do 'mongo://localhost:27017' end it 'raises an exception' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end end let(:scheme) { 'mongodb://' } let(:uri) { described_class.new(string) } describe 'invalid uris' do context 'string is not uri' do let(:string) { 'tyler' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'empty string' do let(:string) { '' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongo://localhost:27017' do let(:string) { 'mongo://localhost:27017' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://' do let(:string) { 'mongodb://' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost::27017' do let(:string) { 'mongodb://localhost::27017' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost::27017/' do let(:string) { 'mongodb://localhost::27017/' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://::' do let(:string) { 'mongodb://::' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost,localhost::' do let(:string) { 'mongodb://localhost,localhost::' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost::27017,abc' do let(:string) { 'mongodb://localhost::27017,abc' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost:-1' do let(:string) { 'mongodb://localhost:-1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost:0/' do let(:string) { 'mongodb://localhost:0/' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost:65536' do let(:string) { 'mongodb://localhost:65536' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://localhost:foo' do let(:string) { 'mongodb://localhost:foo' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://[::1]:-1' do let(:string) { 'mongodb://[::1]:-1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://[::1]:0/' do let(:string) { 'mongodb://[::1]:0/' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://[::1]:65536' do let(:string) { 'mongodb://[::1]:65536' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://[::1]:65536/' do let(:string) { 'mongodb://[::1]:65536/' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://[::1]:foo' do let(:string) { 'mongodb://[::1]:foo' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://example.com?w=1' do let(:string) { 'mongodb://example.com?w=1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://example.com/?w' do let(:string) { 'mongodb://example.com/?w' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://alice:foo:bar@127.0.0.1' do let(:string) { 'mongodb://alice:foo:bar@127.0.0.1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://alice@@127.0.0.1' do let(:string) { 'mongodb://alice@@127.0.0.1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb://alice@foo:bar@127.0.0.1' do let(:string) { 'mongodb://alice@foo:bar@127.0.0.1' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end end describe '#initialize' do context 'string is not uri' do let(:string) { 'tyler' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end end describe '#servers' do let(:string) { "#{scheme}#{servers}" } context 'single server' do let(:servers) { 'localhost' } it 'returns an array with the parsed server' do expect(uri.servers).to eq([servers]) end end context 'single server with port' do let(:servers) { 'localhost:27017' } it 'returns an array with the parsed server' do expect(uri.servers).to eq([servers]) end end context 'numerical ipv4 server' do let(:servers) { '127.0.0.1' } it 'returns an array with the parsed server' do expect(uri.servers).to eq([servers]) end end context 'numerical ipv6 server' do let(:servers) { '[::1]:27107' } it 'returns an array with the parsed server' do expect(uri.servers).to eq([servers]) end end context 'unix socket server' do let(:servers) { '%2Ftmp%2Fmongodb-27017.sock' } it 'returns an array with the parsed server' do expect(uri.servers).to eq([URI.unescape(servers)]) end end context 'multiple servers' do let(:servers) { 'localhost,127.0.0.1' } it 'returns an array with the parsed servers' do expect(uri.servers).to eq(servers.split(',')) end end context 'multiple servers with ports' do let(:servers) { '127.0.0.1:27107,localhost:27018' } it 'returns an array with the parsed servers' do expect(uri.servers).to eq(servers.split(',')) end end end describe '#client_options' do let(:db) { TEST_DB } let(:servers) { 'localhost' } let(:string) { "#{scheme}#{credentials}@#{servers}/#{db}" } let(:user) { 'tyler' } let(:password) { 's3kr4t' } let(:credentials) { "#{user}:#{password}" } let(:options) do uri.client_options end it 'includes the database in the options' do expect(options[:database]).to eq(TEST_DB) end it 'includes the user in the options' do expect(options[:user]).to eq(user) end it 'includes the password in the options' do expect(options[:password]).to eq(password) end end describe '#credentials' do let(:servers) { 'localhost' } let(:string) { "#{scheme}#{credentials}@#{servers}" } let(:user) { 'tyler' } context 'username provided' do let(:credentials) { "#{user}:" } it 'returns the username' do expect(uri.credentials[:user]).to eq(user) end end context 'username and password provided' do let(:password) { 's3kr4t' } let(:credentials) { "#{user}:#{password}" } it 'returns the username' do expect(uri.credentials[:user]).to eq(user) end it 'returns the password' do expect(uri.credentials[:password]).to eq(password) end end end describe '#database' do let(:servers) { 'localhost' } let(:string) { "#{scheme}#{servers}/#{db}" } let(:db) { 'auth-db' } context 'database provided' do it 'returns the database name' do expect(uri.database).to eq(db) end end end describe '#uri_options' do let(:servers) { 'localhost' } let(:string) { "#{scheme}#{servers}/?#{options}" } context 'when no options were provided' do let(:string) { "#{scheme}#{servers}" } it 'returns an empty hash' do expect(uri.uri_options).to be_empty end end context 'write concern options provided' do context 'numerical w value' do let(:options) { 'w=1' } let(:concern) { Mongo::Options::Redacted.new(:w => 1)} it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'w=majority' do let(:options) { 'w=majority' } let(:concern) { Mongo::Options::Redacted.new(:w => :majority) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'journal' do let(:options) { 'journal=true' } let(:concern) { Mongo::Options::Redacted.new(:j => true) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'fsync' do let(:options) { 'fsync=true' } let(:concern) { Mongo::Options::Redacted.new(:fsync => true) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'wtimeoutMS' do let(:timeout) { 1234 } let(:options) { "w=2&wtimeoutMS=#{timeout}" } let(:concern) { Mongo::Options::Redacted.new(:w => 2, :timeout => timeout) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end end context 'read preference option provided' do let(:options) { "readPreference=#{mode}" } context 'primary' do let(:mode) { 'primary' } let(:read) { Mongo::Options::Redacted.new(:mode => :primary) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'primaryPreferred' do let(:mode) { 'primaryPreferred' } let(:read) { Mongo::Options::Redacted.new(:mode => :primary_preferred) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'secondary' do let(:mode) { 'secondary' } let(:read) { Mongo::Options::Redacted.new(:mode => :secondary) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'secondaryPreferred' do let(:mode) { 'secondaryPreferred' } let(:read) { Mongo::Options::Redacted.new(:mode => :secondary_preferred) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'nearest' do let(:mode) { 'nearest' } let(:read) { Mongo::Options::Redacted.new(:mode => :nearest) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end end context 'read preference tags provided' do context 'single read preference tag set' do let(:options) do 'readPreferenceTags=dc:ny,rack:1' end let(:read) do Mongo::Options::Redacted.new(:tag_sets => [{ 'dc' => 'ny', 'rack' => '1' }]) end it 'sets the read preference tag set' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'multiple read preference tag sets' do let(:options) do 'readPreferenceTags=dc:ny&readPreferenceTags=dc:bos' end let(:read) do Mongo::Options::Redacted.new(:tag_sets => [{ 'dc' => 'ny' }, { 'dc' => 'bos' }]) end it 'sets the read preference tag sets' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end end context 'read preference max staleness option provided' do let(:options) do 'readPreference=Secondary&maxStalenessSeconds=120' end let(:read) do Mongo::Options::Redacted.new(mode: :secondary, :max_staleness => 120) end it 'sets the read preference max staleness in seconds' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end context 'when the read preference and max staleness combination is invalid' do context 'when max staleness is combined with read preference mode primary' do let(:options) do 'readPreference=primary&maxStalenessSeconds=120' end it 'raises an exception when read preference is accessed on the client' do expect { Mongo::Client.new(string).server_selector }.to raise_exception(Mongo::Error::InvalidServerPreference) end end context 'when the max staleness value is too small' do let(:options) do 'readPreference=secondary&maxStalenessSeconds=89' end it 'does not raise an exception until the read preference is used' do expect(Mongo::Client.new(string).read_preference).to eq(BSON::Document.new(mode: :secondary, max_staleness: 89)) end end end end context 'replica set option provided' do let(:rs_name) { TEST_SET } let(:options) { "replicaSet=#{rs_name}" } it 'sets the replica set option' do expect(uri.uri_options[:replica_set]).to eq(rs_name) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:replica_set]).to eq(rs_name) end end context 'auth mechanism provided' do let(:options) { "authMechanism=#{mechanism}" } context 'plain' do let(:mechanism) { 'PLAIN' } let(:expected) { :plain } it 'sets the auth mechanism to :plain' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'mongodb-cr' do let(:mechanism) { 'MONGODB-CR' } let(:expected) { :mongodb_cr } it 'sets the auth mechanism to :mongodb_cr' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'gssapi' do let(:mechanism) { 'GSSAPI' } let(:expected) { :gssapi } it 'sets the auth mechanism to :gssapi' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'scram-sha-1' do let(:mechanism) { 'SCRAM-SHA-1' } let(:expected) { :scram } it 'sets the auth mechanism to :scram' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'mongodb-x509' do let(:mechanism) { 'MONGODB-X509' } let(:expected) { :mongodb_x509 } it 'sets the auth mechanism to :mongodb_x509' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end context 'when a username is not provided' do it 'recognizes the mechanism with no username' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) expect(Mongo::Client.new(string.downcase).options[:user]).to be_nil end end end end context 'auth source provided' do let(:options) { "authSource=#{source}" } context 'regular db' do let(:source) { 'foo' } it 'sets the auth source to the database' do expect(uri.uri_options[:auth_source]).to eq(source) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_source]).to eq(source) end end context '$external' do let(:source) { '$external' } let(:expected) { :external } it 'sets the auth source to :external' do expect(uri.uri_options[:auth_source]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_source]).to eq(expected) end end end context 'auth mechanism properties provided' do context 'service_name' do let(:options) do "authMechanismProperties=SERVICE_NAME:#{service_name}" end let(:service_name) { 'foo' } let(:expected) { Mongo::Options::Redacted.new({ service_name: service_name }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'canonicalize_host_name' do let(:options) do "authMechanismProperties=CANONICALIZE_HOST_NAME:#{canonicalize_host_name}" end let(:canonicalize_host_name) { 'true' } let(:expected) { Mongo::Options::Redacted.new({ canonicalize_host_name: true }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'service_realm' do let(:options) do "authMechanismProperties=SERVICE_REALM:#{service_realm}" end let(:service_realm) { 'dumdum' } let(:expected) { Mongo::Options::Redacted.new({ service_realm: service_realm }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'multiple properties' do let(:options) do "authMechanismProperties=SERVICE_REALM:#{service_realm}," + "CANONICALIZE_HOST_NAME:#{canonicalize_host_name}," + "SERVICE_NAME:#{service_name}" end let(:service_name) { 'foo' } let(:canonicalize_host_name) { 'true' } let(:service_realm) { 'dumdum' } let(:expected) do Mongo::Options::Redacted.new({ service_name: service_name, canonicalize_host_name: true, service_realm: service_realm }) end it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end end context 'connectTimeoutMS' do let(:options) { "connectTimeoutMS=4567" } it 'sets the the connect timeout' do expect(uri.uri_options[:connect_timeout]).to eq(4.567) end end context 'socketTimeoutMS' do let(:options) { "socketTimeoutMS=8910" } it 'sets the socket timeout' do expect(uri.uri_options[:socket_timeout]).to eq(8.910) end end context 'when providing serverSelectionTimeoutMS' do let(:options) { "serverSelectionTimeoutMS=3561" } it 'sets the the connect timeout' do expect(uri.uri_options[:server_selection_timeout]).to eq(3.561) end end context 'when providing localThresholdMS' do let(:options) { "localThresholdMS=3561" } it 'sets the the connect timeout' do expect(uri.uri_options[:local_threshold]).to eq(3.561) end end context 'when providing maxPoolSize' do let(:max_pool_size) { 10 } let(:options) { "maxPoolSize=#{max_pool_size}" } it 'sets the max pool size option' do expect(uri.uri_options[:max_pool_size]).to eq(max_pool_size) end end context 'when providing minPoolSize' do let(:min_pool_size) { 5 } let(:options) { "minPoolSize=#{min_pool_size}" } it 'sets the min pool size option' do expect(uri.uri_options[:min_pool_size]).to eq(min_pool_size) end end context 'when providing waitQueueTimeoutMS' do let(:wait_queue_timeout) { 500 } let(:options) { "waitQueueTimeoutMS=#{wait_queue_timeout}" } it 'sets the wait queue timeout option' do expect(uri.uri_options[:wait_queue_timeout]).to eq(0.5) end end context 'ssl' do let(:options) { "ssl=#{ssl}" } context 'true' do let(:ssl) { true } it 'sets the ssl option to true' do expect(uri.uri_options[:ssl]).to be true end end context 'false' do let(:ssl) { false } it 'sets the ssl option to false' do expect(uri.uri_options[:ssl]).to be false end end end context 'grouped and non-grouped options provided' do let(:options) { 'w=1&ssl=true' } it 'do not overshadow top level options' do expect(uri.uri_options).not_to be_empty end end context 'when an invalid option is provided' do let(:options) { 'invalidOption=10' } let(:uri_options) do uri.uri_options end it 'does not raise an exception' do expect(uri_options).to be_empty end context 'when an invalid option is combined with valid options' do let(:options) { 'invalidOption=10&waitQueueTimeoutMS=500&ssl=true' } it 'does not raise an exception' do expect(uri_options).not_to be_empty end it 'sets the valid options' do expect(uri_options[:wait_queue_timeout]).to eq(0.5) expect(uri_options[:ssl]).to be true end end end context 'when an app name option is provided' do let(:options) { "appname=reports" } it 'sets the app name on the client' do expect(Mongo::Client.new(string).options[:app_name]).to eq(:reports) end end context 'when a supported compressors option is provided' do let(:options) { "compressors=zlib" } it 'sets the compressors as an array on the client' do expect(Mongo::Client.new(string).options[:compressors]).to eq(['zlib']) end end context 'when a non-supported compressors option is provided' do let(:options) { "compressors=snoopy" } let(:client) do Mongo::Client.new(string) end it 'sets no compressors on the client and warns' do expect(Mongo::Logger.logger).to receive(:warn) expect(client.options[:compressors]).to be_nil end end context 'when a zlibCompressionLevel option is provided' do let(:options) { "zlibCompressionLevel=6" } it 'sets the zlib compression level on the client' do expect(Mongo::Client.new(string).options[:zlib_compression_level]).to eq(6) end end end end mongo-2.5.1/spec/mongo/operation/0000755000004100000410000000000013257253113016741 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/commands/0000755000004100000410000000000013257253113020542 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/commands/aggregate_spec.rb0000644000004100000410000000251413257253113024031 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::Aggregate do let(:options) do {} end let(:selector) do { :aggregate => TEST_COLL, :pipeline => [], } end let(:spec) do { :selector => selector, :options => options, :db_name => TEST_DB } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to be(spec) end end end describe '#==' do context ' when two ops have different specs' do let(:other_selector) do { :aggregate => 'another_test_coll', :pipeline => [], } end let(:other_spec) do { :selector => other_selector, :options => options, :db_name => TEST_DB, } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#execute' do context 'when the aggregation fails' do let(:selector) do { :aggregate => TEST_COLL, :pipeline => [{ '$invalid' => 'operator' }], } end it 'raises an exception' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/commands/map_reduce_spec.rb0000644000004100000410000000436413257253113024214 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::MapReduce do let(:map) do %Q{ function() { emit(this.name, { population: this.population }); }} end let(:reduce) do %Q{ function(key, values) { var result = { population: 0 }; values.forEach(function(value) { result.population += value.population; }); return result; }} end let(:options) do {} end let(:selector) do { :mapreduce => TEST_COLL, :map => map, :reduce => reduce, :query => {}, :out => { inline: 1 } } end let(:spec) do { :selector => selector, :options => options, :db_name => TEST_DB } end let(:op) do described_class.new(spec) end describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to be(spec) end end end describe '#==' do context ' when two ops have different specs' do let(:other_selector) do { :mapreduce => 'other_test_coll', :map => '', :reduce => '', } end let(:other_spec) do { :selector => other_selector, :options => {}, :db_name => TEST_DB, } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#execute' do let(:documents) do [ { name: 'Berlin', population: 3000000 }, { name: 'London', population: 9000000 } ] end before do authorized_collection.insert_many(documents) end after do authorized_collection.delete_many end context 'when the map/reduce succeeds' do let(:response) do op.execute(authorized_primary) end it 'returns the response' do expect(response).to be_successful end end context 'when the map/reduce fails' do let(:selector) do { :mapreduce => TEST_COLL, :map => map, :reduce => reduce, :query => {} } end it 'raises an exception' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/commands/command_spec.rb0000644000004100000410000000312513257253113023520 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::Command do let(:selector) { { :ismaster => 1 } } let(:options) { { :limit => -1 } } let(:spec) do { :selector => selector, :options => options, :db_name => TEST_DB } end let(:op) { described_class.new(spec) } describe '#initialize' do it 'sets the spec' do expect(op.spec).to be(spec) end end describe '#==' do context 'when the ops have different specs' do let(:other_selector) { { :ping => 1 } } let(:other_spec) do { :selector => other_selector, :options => {}, :db_name => 'test', } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#execute' do context 'when the command succeeds' do let(:response) do op.execute(authorized_primary) end it 'returns the reponse' do expect(response).to be_successful end end context 'when the command fails' do let(:selector) do { notacommand: 1 } end it 'raises an exception' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when a document exceeds max bson size' do let(:selector) do { :ismaster => '1'*17000000 } end it 'raises an error' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::MaxBSONSize) end end end end mongo-2.5.1/spec/mongo/operation/commands/aggregate/0000755000004100000410000000000013257253113022470 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/commands/aggregate/result_spec.rb0000644000004100000410000000352513257253113025352 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::Aggregate::Result do let(:result) do described_class.new(reply) end let(:cursor_id) { 0 } let(:documents) { [] } let(:flags) { [] } let(:starting_from) { 0 } let(:reply) do Mongo::Protocol::Reply.new.tap do |reply| reply.instance_variable_set(:@flags, flags) reply.instance_variable_set(:@cursor_id, cursor_id) reply.instance_variable_set(:@starting_from, starting_from) reply.instance_variable_set(:@number_returned, documents.size) reply.instance_variable_set(:@documents, documents) end end let(:aggregate) do [ { '_id' => 'New York', 'totalpop' => 40270 }, { '_id' => 'Berlin', 'totalpop' => 103056 } ] end describe '#cursor_id' do context 'when the result is not using a cursor' do let(:documents) do [{ 'result' => aggregate, 'ok' => 1.0 }] end it 'returns zero' do expect(result.cursor_id).to eq(0) end end context 'when the result is using a cursor' do let(:documents) do [{ 'cursor' => { 'id' => 15, 'ns' => 'test', 'firstBatch' => aggregate }, 'ok' => 1.0 }] end it 'returns the cursor id' do expect(result.cursor_id).to eq(15) end end end describe '#documents' do context 'when the result is not using a cursor' do let(:documents) do [{ 'result' => aggregate, 'ok' => 1.0 }] end it 'returns the documents' do expect(result.documents).to eq(aggregate) end end context 'when the result is using a cursor' do let(:documents) do [{ 'cursor' => { 'id' => 15, 'ns' => 'test', 'firstBatch' => aggregate }, 'ok' => 1.0 }] end it 'returns the documents' do expect(result.documents).to eq(aggregate) end end end end mongo-2.5.1/spec/mongo/operation/commands/indexes_spec.rb0000644000004100000410000000112513257253113023537 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::Indexes do describe '#execute' do let(:spec) do { name: 1 } end before do authorized_collection.indexes.create_one(spec, unique: true) end after do authorized_collection.indexes.drop_one('name_1') end let(:operation) do described_class.new(db_name: TEST_DB, coll_name: TEST_COLL) end let(:indexes) do operation.execute(authorized_primary) end it 'returns the indexes for the collection' do expect(indexes.documents.size).to eq(2) end end end mongo-2.5.1/spec/mongo/operation/commands/collections_info_spec.rb0000644000004100000410000000131313257253113025430 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Commands::CollectionsInfo do let(:spec) do { :db_name => TEST_DB } end let(:names) do [ 'berlin', 'london' ] end let(:op) do described_class.new(spec) end describe '#execute' do before do names.each do |name| authorized_client[name].insert_one(x: 1) end end after do names.each do |name| authorized_client[name].drop end end let(:info) do docs = op.execute(authorized_primary).documents docs.collect { |info| info['name'].sub("#{TEST_DB}.", '') } end it 'returns the list of collection info' do expect(info).to include(*names) end end end mongo-2.5.1/spec/mongo/operation/write/0000755000004100000410000000000013257253113020073 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/write/update_spec.rb0000644000004100000410000001426013257253113022717 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Update do let(:document) do { :q => { :foo => 1 }, :u => { :$set => { :bar => 1 } }, :multi => true, :upsert => false } end let(:spec) do { :update => document, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1), :ordered => true } end let(:update) do described_class.new(spec) end describe '#initialize' do context 'spec' do it 'sets the spec' do expect(update.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(update).to eq(other) end end context 'when two ops have different specs' do let(:other_doc) { {:q => { :foo => 1 }, :u => { :$set => { :bar => 1 } }, :multi => true, :upsert => true } } let(:other_spec) do { :update => other_doc, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1), :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(update).not_to eq(other) end end end end describe '#execute' do before do authorized_collection.insert_many([ { name: 'test', field: 'test', other: 'test' }, { name: 'testing', field: 'test', other: 'test' } ]) end after do authorized_collection.delete_many end context 'when updating a single document' do let(:update) do described_class.new({ update: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(:w => 1) }) end context 'when the update succeeds' do let(:document) do { 'q' => { name: 'test' }, 'u' => { '$set' => { field: 'blah' }} } end let(:result) do update.execute(authorized_primary) end it 'updates the document' do expect(result.written_count).to eq(1) end it 'reports the modified count' do expect(result.modified_count).to eq(1) end it 'reports the matched count' do expect(result.matched_count).to eq(1) end it 'reports the upserted id as nil' do expect(result.upserted_id).to eq(nil) end end context 'when the update fails' do let(:document) do { 'q' => { name: 'test' }, 'u' => { '$st' => { field: 'blah' } } } end it 'raises an exception' do expect { update.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when updating multiple documents' do let(:update) do described_class.new({ update: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(:w => 1) }) end context 'when the updates succeed' do let(:document) do { 'q' => { field: 'test' }, 'u' => { '$set' => { other: 'blah' }}, 'multi' => true } end let(:result) do update.execute(authorized_primary) end it 'updates the documents' do expect(result.written_count).to eq(2) end it 'reports the modified count' do expect(result.modified_count).to eq(2) end it 'reports the matched count' do expect(result.matched_count).to eq(2) end it 'reports the upserted id as nil' do expect(result.upserted_id).to eq(nil) end end context 'when an update fails' do let(:document) do { 'q' => { name: 'test' }, 'u' => { '$st' => { field: 'blah' } }, 'multi' => true } end it 'raises an exception' do expect { update.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when a document exceeds max bson size' do let(:document) do { 'q' => { name: 't'*17000000}, 'u' => { '$set' => { field: 'blah' } } } end it 'raises an error' do expect { update.execute(authorized_primary) }.to raise_error(Mongo::Error::MaxBSONSize) end end context 'when upsert is true' do let(:document) do { 'q' => { field: 'non-existent' }, 'u' => { '$set' => { other: 'blah' }}, 'upsert' => true } end let(:result) do update.execute(authorized_primary) end it 'inserts the document' do expect(result.written_count).to eq(1) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end it 'retruns the upserted id' do expect(result.upserted_id).to be_a(BSON::ObjectId) end end end context 'when write concern { w: 0 } is used', unless: op_msg_enabled? do let(:update) do described_class.new({ update: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(:w => 0) }) end let(:document) do { 'q' => { name: 'test' }, 'u' => { '$set' => { field: 'blah' }}, limit: 1 } end let(:result) do update.execute(authorized_primary) end before do expect(update).to receive(:execute_message).and_call_original end it 'uses op codes instead of write commands' do expect(result.written_count).to eq(0) end end end end mongo-2.5.1/spec/mongo/operation/write/command/0000755000004100000410000000000013257253113021511 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/write/command/update_spec.rb0000644000004100000410000001450013257253113024332 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::Update do let(:updates) { [{:q => { :foo => 1 }, :u => { :$set => { :bar => 1 } }, :multi => true, :upsert => false }] } let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end let(:session) { nil } let(:spec) do { :updates => updates, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => write_concern, :ordered => true, :session => session } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_updates) { [{:q => { :bar => 1 }, :u => { :$set => { :bar => 2 } }, :multi => true, :upsert => false }] } let(:other_spec) do { :updates => other_updates, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe 'write concern' do context 'when write concern is not specified' do let(:spec) do { :updates => updates, :db_name => TEST_DB, :coll_name => TEST_COLL, :ordered => true } end it 'does not include write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to be_nil end end context 'when write concern is specified' do it 'includes write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to eq(write_concern.options) end end end describe '#message' do context 'when the server supports OP_MSG', if: op_msg_enabled? do let(:global_args) do { update: TEST_COLL, ordered: true, writeConcern: write_concern.options, '$db' => TEST_DB, lsid: session.session_id } end let(:expected_payload_1) do { type: 1, payload: { identifier: 'updates', sequence: updates } } end let(:session) do authorized_client.start_session end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do let(:expected_global_args) do global_args.delete(:lsid) global_args end before do session.instance_variable_set(:@options, { implicit: true }) allow(authorized_primary.features).to receive(:sessions_enabled?).and_return(false) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end context 'when the write concern is 0' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end end context 'when the server does not support OP_MSG' do let(:expected_selector) do { :update => TEST_COLL, :updates => updates, :ordered => true, :writeConcern => write_concern.options } end it 'creates the correct Command message', unless: op_msg_enabled? do expect(Mongo::Protocol::Query).to receive(:new).with(TEST_DB, '$cmd', expected_selector, { limit: -1 }) op.send(:message, authorized_primary) end end end end mongo-2.5.1/spec/mongo/operation/write/command/remove_user_spec.rb0000644000004100000410000000165613257253113025413 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::RemoveUser do describe '#execute' do before do root_authorized_client.database.users.create( 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end let(:operation) do described_class.new(user_name: 'durran', db_name: TEST_DB) end context 'when user removal was successful' do let!(:response) do operation.execute(root_authorized_primary) end it 'removes the user from the database' do expect(response).to be_successful end end context 'when removal was not successful' do before do operation.execute(root_authorized_primary) end it 'raises an exception' do expect { operation.execute(root_authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/write/command/drop_index_spec.rb0000644000004100000410000000202513257253113025202 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::DropIndex do describe '#execute' do context 'when the index exists' do let(:spec) do { another: -1 } end before do authorized_collection.indexes.create_one(spec, unique: true) end let(:operation) do described_class.new( db_name: TEST_DB, coll_name: TEST_COLL, index_name: 'another_-1' ) end let(:response) do operation.execute(authorized_primary) end it 'removes the index' do expect(response).to be_successful end end context 'when the index does not exist' do let(:operation) do described_class.new( db_name: TEST_DB, coll_name: TEST_COLL, index_name: 'another_blah' ) end it 'raises an exception' do expect { operation.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/write/command/create_index_spec.rb0000644000004100000410000000254613257253113025511 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::CreateIndex do describe '#execute' do context 'when the index is created' do let(:spec) do { key: { random: 1 }, name: 'random_1', unique: true } end let(:operation) do described_class.new(indexes: [ spec ], db_name: TEST_DB, coll_name: TEST_COLL) end let(:response) do operation.execute(authorized_primary) end after do authorized_collection.indexes.drop_one('random_1') end it 'returns ok' do expect(response).to be_successful end end context 'when index creation fails' do let(:spec) do { key: { random: 1 }, name: 'random_1', unique: true } end let(:operation) do described_class.new(indexes: [ spec ], db_name: TEST_DB, coll_name: TEST_COLL) end let(:second_operation) do described_class.new(indexes: [ spec.merge(unique: false) ], db_name: TEST_DB, coll_name: TEST_COLL) end before do operation.execute(authorized_primary) end after do authorized_collection.indexes.drop_one('random_1') end it 'raises an exception' do expect { second_operation.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/write/command/delete_spec.rb0000644000004100000410000001462713257253113024324 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::Delete do let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end let(:session) { nil } let(:deletes) { [{:q => { :foo => 1 }, :limit => 1}] } let(:spec) do { :deletes => deletes, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :write_concern => write_concern, :ordered => true, :session => session } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_deletes) { [{:q => { :bar => 1 }, :limit => 1}] } let(:other_spec) do { :deletes => other_deletes, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :write_concern => write_concern, :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe 'write concern' do context 'when write concern is not specified' do let(:spec) do { :deletes => deletes, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :ordered => true } end it 'does not include write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to be_nil end end context 'when write concern is specified' do it 'includes write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to eq(write_concern.options) end end end describe '#message' do context 'when the server supports OP_MSG' do let(:global_args) do { delete: TEST_COLL, ordered: true, writeConcern: write_concern.options, '$db' => TEST_DB, lsid: session.session_id } end let(:expected_payload_1) do { type: 1, payload: { identifier: 'deletes', sequence: deletes } } end let(:session) do authorized_client.start_session end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do let(:expected_global_args) do global_args.delete(:lsid) global_args end before do session.instance_variable_set(:@options, { implicit: true }) allow(authorized_primary.features).to receive(:sessions_enabled?).and_return(false) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end context 'when the write concern is 0' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end end context 'when the server does not support OP_MSG' do let(:expected_selector) do { :delete => authorized_collection.name, :deletes => deletes, :writeConcern => write_concern.options, :ordered => true } end it 'creates the correct query wire protocol message', unless: op_msg_enabled? do expect(Mongo::Protocol::Query).to receive(:new).with(authorized_collection.database.name, '$cmd', expected_selector, { limit: -1 } ) op.send(:message, authorized_primary) end end end end mongo-2.5.1/spec/mongo/operation/write/command/update_user_spec.rb0000644000004100000410000000170513257253113025373 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::UpdateUser do describe '#execute' do let(:user) do Mongo::Auth::User.new( user: 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end let(:user_updated) do Mongo::Auth::User.new( user: 'durran', password: '123', roles: [ Mongo::Auth::Roles::READ ] ) end let(:operation) do described_class.new(user: user_updated, db_name: TEST_DB) end before do root_authorized_client.database.users.create(user) end after do root_authorized_client.database.users.remove('durran') end context 'when user update was successful' do let!(:response) do operation.execute(root_authorized_primary) end it 'updates the user in the database' do expect(response).to be_successful end end end end mongo-2.5.1/spec/mongo/operation/write/command/insert_spec.rb0000644000004100000410000001701613257253113024361 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::Insert do let(:documents) { [{ :_id => 1, :foo => 1 }] } let(:session) { nil } let(:spec) do { :documents => documents, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :write_concern => write_concern, :ordered => true, :session => session } end let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_documents) { [{ :bar => 1 }] } let(:other_spec) do { :documents => other_documents, :db_name => authorized_collection.database.name, :insert => authorized_collection.name, :write_concern => write_concern.options, :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe 'write concern' do context 'when write concern is not specified' do let(:spec) do { :documents => documents, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :ordered => true } end it 'does not include write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to be_nil end end context 'when write concern is specified' do it 'includes write concern in the selector' do expect(op.send(:selector)[:writeConcern]).to eq(write_concern.options) end end end describe '#message' do context 'when the server supports OP_MSG', if: op_msg_enabled? do let(:documents) do [ { foo: 1}, { bar: 2 }] end let(:global_args) do { insert: TEST_COLL, ordered: true, writeConcern: write_concern.options, '$db' => TEST_DB, lsid: session.session_id } end let(:expected_payload_1) do { type: 1, payload: { identifier: 'documents', sequence: documents } } end let(:session) do authorized_client.start_session end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], { validating_keys: true }, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], { validating_keys: true }, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do let(:expected_global_args) do global_args.delete(:lsid) global_args end before do session.instance_variable_set(:@options, { implicit: true }) allow(authorized_primary.features).to receive(:sessions_enabled?).and_return(false) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:none], { validating_keys: true }, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end context 'when the write concern is 0' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end context 'when the topology is replica set or sharded', if: (replica_set? || sharded?) && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], { validating_keys: true }, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end context 'when the topology is standalone', if: standalone? && op_msg_enabled? do let(:expected_global_args) do global_args.delete(:lsid) global_args end it 'creates the correct OP_MSG message' do authorized_client.command(ping:1) expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], { validating_keys: true }, expected_global_args, expected_payload_1) op.send(:message, authorized_primary) end end end end context 'when the server does not support OP_MSG' do let(:expected_selector) do { :documents => documents, :insert => authorized_collection.name, :writeConcern => write_concern.options, :ordered => true } end it 'creates a query wire protocol message with correct specs', unless: op_msg_enabled? do expect(Mongo::Protocol::Query).to receive(:new).with(authorized_collection.database.name, '$cmd', expected_selector, { limit: -1, validating_keys: true }) op.send(:message, authorized_primary) end end end end mongo-2.5.1/spec/mongo/operation/write/create_user_spec.rb0000644000004100000410000000173013257253113023734 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Command::CreateUser do describe '#execute' do let(:user) do Mongo::Auth::User.new( user: 'durran', password: 'password', roles: [ Mongo::Auth::Roles::READ_WRITE ] ) end let(:operation) do described_class.new(user: user, db_name: TEST_DB) end after do root_authorized_client.database.users.remove('durran') end context 'when user creation was successful' do let!(:response) do operation.execute(root_authorized_primary) end it 'saves the user in the database' do expect(response).to be_successful end end context 'when creation was not successful' do it 'raises an exception' do expect { operation.execute(root_authorized_primary) operation.execute(root_authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/operation/write/bulk/0000755000004100000410000000000013257253113021030 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/write/bulk/update_spec.rb0000644000004100000410000001260713257253113023657 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Bulk::Update do let(:documents) do [{ :q => { :foo => 1 }, :u => { :$set => { :bar => 1 } }, :multi => true, :upsert => false }] end let(:spec) do { updates: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: true } end let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end let(:op) do described_class.new(spec) end describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_docs) do [ {:q => { :foo => 1 }, :u => { :$set => { :bar => 1 } }, :multi => true, :upsert => true } ] end let(:other_spec) do { updates: other_docs, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe '#execute' do before do authorized_collection.insert_many([ { name: 'test', field: 'test', other: 'test' }, { name: 'testing', field: 'test', other: 'test' } ]) end after do authorized_collection.delete_many end context 'when updating a single document' do context 'when the update passes' do let(:documents) do [{ 'q' => { other: 'test' }, 'u' => { '$set' => { field: 'blah' }}, 'multi' => false }] end it 'updates the document' do op.execute(authorized_primary) expect(authorized_collection.find(field: 'blah').count).to eq(1) end end end context 'when updating multiple documents' do let(:update) do described_class.new({ updates: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern }) end context 'when the updates succeed' do let(:documents) do [{ 'q' => { other: 'test' }, 'u' => { '$set' => { field: 'blah' }}, 'multi' => true }] end it 'updates the documents' do op.execute(authorized_primary) expect(authorized_collection.find(field: 'blah').count).to eq(2) end end end context 'when the updates are ordered' do let(:documents) do [ { 'q' => { name: 'test' }, 'u' => { '$st' => { field: 'blah' }}, 'multi' => true}, { 'q' => { field: 'test' }, 'u' => { '$set' => { other: 'blah' }}, 'multi' => true } ] end let(:spec) do { updates: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: true } end let(:failing_update) do described_class.new(spec) end context 'when the update fails' do context 'when write concern is acknowledged' do it 'aborts after first error' do failing_update.execute(authorized_primary) expect(authorized_collection.find(other: 'blah').count).to eq(0) end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end it 'aborts after first error' do failing_update.execute(authorized_primary) expect(authorized_collection.find(other: 'blah').count).to eq(0) end end end end context 'when the updates are unordered' do let(:documents) do [ { 'q' => { name: 'test' }, 'u' => { '$st' => { field: 'blah' }}, 'multi' => true}, { 'q' => { field: 'test' }, 'u' => { '$set' => { other: 'blah' }}, 'multi' => false } ] end let(:spec) do { updates: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: false } end let(:failing_update) do described_class.new(spec) end context 'when the update fails' do context 'when write concern is acknowledged' do it 'does not abort after first error' do failing_update.execute(authorized_primary) expect(authorized_collection.find(other: 'blah').count).to eq(1) end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end it 'does not abort after first error' do failing_update.execute(authorized_primary) expect(authorized_collection.find(other: 'blah').count).to eq(1) end end end end end end mongo-2.5.1/spec/mongo/operation/write/bulk/delete_spec.rb0000644000004100000410000001237013257253113023634 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Bulk::Delete do let(:documents) do [ { 'q' => { foo: 1 }, 'limit' => 1 } ] end let(:spec) do { :deletes => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_docs) do [ { 'q' => { bar: 1 }, 'limit' => 1 } ] end let(:other_spec) do { :deletes => other_docs, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe '#execute' do before do authorized_collection.insert_many([ { name: 'test', field: 'test' }, { name: 'testing', field: 'test' } ]) end after do authorized_collection.delete_many end context 'when deleting a single document' do let(:op) do described_class.new({ deletes: documents, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(w: 1) }) end context 'when the delete succeeds' do let(:documents) do [{ 'q' => { field: 'test' }, 'limit' => 1 }] end it 'deletes the document from the database' do op.execute(authorized_primary) expect(authorized_collection.find.count).to eq(1) end end end context 'when deleting multiple documents' do let(:op) do described_class.new({ deletes: documents, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(WRITE_CONCERN) }) end context 'when the deletes succeed' do let(:documents) do [{ 'q' => { field: 'test' }, 'limit' => 0 }] end it 'deletes the documents from the database' do op.execute(authorized_primary) expect(authorized_collection.find.count).to eq(0) end end end context 'when the deletes are ordered' do let(:documents) do [ { q: { '$set' => { a: 1 } }, limit: 0 }, { 'q' => { field: 'test' }, 'limit' => 1 } ] end let(:spec) do { :deletes => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:failing_delete) do described_class.new(spec) end context 'when the delete fails' do context 'when write concern is acknowledged' do let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end it 'aborts after first error' do failing_delete.execute(authorized_primary) expect(authorized_collection.find.count).to eq(2) end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end it 'aborts after first error' do failing_delete.execute(authorized_primary) expect(authorized_collection.find.count).to eq(2) end end end end context 'when the deletes are unordered' do let(:documents) do [ { q: { '$set' => { a: 1 } }, limit: 0 }, { 'q' => { field: 'test' }, 'limit' => 1 } ] end let(:spec) do { :deletes => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => false } end let(:failing_delete) do described_class.new(spec) end context 'when the delete fails' do context 'when write concern is acknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 1) end it 'does not abort after first error' do failing_delete.execute(authorized_primary) expect(authorized_collection.find.count).to eq(1) end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end it 'does not abort after first error' do failing_delete.execute(authorized_primary) expect(authorized_collection.find.count).to eq(1) end end end end end end mongo-2.5.1/spec/mongo/operation/write/bulk/insert_spec.rb0000644000004100000410000001234113257253113023674 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Bulk::Insert do let(:documents) do [{ :name => 'test' }] end let(:write_concern) do Mongo::WriteConcern.get(WRITE_CONCERN) end let(:spec) do { documents: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern } end let(:op) do described_class.new(spec) end after do authorized_collection.delete_many end describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two inserts have the same specs' do let(:other) do described_class.new(spec) end it 'returns true' do expect(op).to eq(other) end end context 'when two inserts have different specs' do let(:other_docs) do [{ :bar => 1 }] end let(:other_spec) do { :documents => other_docs, :db_name => 'test', :coll_name => 'coll_name', :write_concern => { 'w' => 1 }, :ordered => true } end let(:other) do described_class.new(other_spec) end it 'returns false' do expect(op).not_to eq(other) end end end end describe 'document ids' do context 'when documents do not contain an id' do let(:documents) do [{ 'field' => 'test' }, { 'field' => 'test' }] end let(:inserted_ids) do op.execute(authorized_primary).inserted_ids end let(:collection_ids) do authorized_collection.find(field: 'test').collect { |d| d['_id'] } end it 'adds an id to the documents' do expect(inserted_ids).to eq(collection_ids) end end end describe '#execute' do before do authorized_collection.indexes.create_one({ name: 1 }, { unique: true }) end after do authorized_collection.delete_many authorized_collection.indexes.drop_one('name_1') end context 'when inserting a single document' do context 'when the insert succeeds' do let(:response) do op.execute(authorized_primary) end it 'inserts the documents into the database' do expect(response.written_count).to eq(1) end end end context 'when inserting multiple documents' do context 'when the insert succeeds' do let(:documents) do [{ name: 'test1' }, { name: 'test2' }] end let(:response) do op.execute(authorized_primary) end it 'inserts the documents into the database' do expect(response.written_count).to eq(2) end end end context 'when the inserts are ordered' do let(:documents) do [{ name: 'test' }, { name: 'test' }, { name: 'test1' }] end let(:spec) do { documents: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: true } end let(:failing_insert) do described_class.new(spec) end context 'when write concern is acknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 1) end context 'when the insert fails' do it 'aborts after first error' do failing_insert.execute(authorized_primary) expect(authorized_collection.find.count).to eq(1) end end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end context 'when the insert fails' do it 'aborts after first error' do failing_insert.execute(authorized_primary) expect(authorized_collection.find.count).to eq(1) end end end end context 'when the inserts are unordered' do let(:documents) do [{ name: 'test' }, { name: 'test' }, { name: 'test1' }] end let(:spec) do { documents: documents, db_name: authorized_collection.database.name, coll_name: authorized_collection.name, write_concern: write_concern, ordered: false } end let(:failing_insert) do described_class.new(spec) end context 'when write concern is acknowledged' do context 'when the insert fails' do it 'does not abort after first error' do failing_insert.execute(authorized_primary) expect(authorized_collection.find.count).to eq(2) end end end context 'when write concern is unacknowledged' do let(:write_concern) do Mongo::WriteConcern.get(w: 0) end context 'when the insert fails' do it 'does not after first error' do failing_insert.execute(authorized_primary) expect(authorized_collection.find.count).to eq(2) end end end end end end mongo-2.5.1/spec/mongo/operation/write/delete_spec.rb0000644000004100000410000001153713257253113022703 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Delete do let(:document) do { :q => { :foo => 1 }, :limit => 1 } end let(:spec) do { :delete => document, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'spec' do it 'sets the spec' do expect(op.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two ops have the same specs' do let(:other) { described_class.new(spec) } it 'returns true' do expect(op).to eq(other) end end context 'when two ops have different specs' do let(:other_doc) { { :q => { :bar => 1 }, :limit => 1 } } let(:other_spec) do { :delete => other_doc, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(WRITE_CONCERN), :ordered => true } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end end describe '#execute' do before do authorized_collection.insert_many([ { name: 'test', field: 'test' }, { name: 'testing', field: 'test' } ]) end after do authorized_collection.delete_many end context 'when deleting a single document' do let(:delete) do described_class.new({ delete: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(WRITE_CONCERN) }) end context 'when the delete succeeds' do let(:document) do { 'q' => { field: 'test' }, 'limit' => 1 } end let(:result) do delete.execute(authorized_primary) end it 'deletes the documents from the database' do expect(result.written_count).to eq(1) end it 'reports the correct deleted count' do expect(result.deleted_count).to eq(1) end end context 'when the delete fails' do let(:document) do { que: { field: 'test' } } end it 'raises an exception' do expect { delete.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when deleting multiple documents' do let(:delete) do described_class.new({ delete: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(WRITE_CONCERN) }) end context 'when the deletes succeed' do let(:document) do { 'q' => { field: 'test' }, 'limit' => 0 } end let(:result) do delete.execute(authorized_primary) end it 'deletes the documents from the database' do expect(result.written_count).to eq(2) end it 'reports the correct deleted count' do expect(result.deleted_count).to eq(2) end end context 'when a delete fails' do let(:document) do { q: { '$set' => { a: 1 } }, limit: 0 } end let(:result) do delete.execute(authorized_primary) end it 'does not delete any documents' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) expect(authorized_collection.find.count).to eq(2) end end context 'when a document exceeds max bson size' do let(:document) do { 'q' => { field: 't'*17000000 }, 'limit' => 0 } end it 'raises an error' do expect { op.execute(authorized_primary) }.to raise_error(Mongo::Error::MaxBSONSize) end end end context 'when write concern { w: 0 } is used', unless: op_msg_enabled? do let(:delete) do described_class.new({ delete: document, db_name: TEST_DB, coll_name: TEST_COLL, write_concern: Mongo::WriteConcern.get(:w => 0) }) end let(:document) do { 'q' => { field: 'test' }, 'limit' => 1 } end let(:result) do delete.execute(authorized_primary) end before do expect(delete).to receive(:execute_message).and_call_original end it 'uses op codes instead of write commands' do expect(result.written_count).to eq(0) end end end end mongo-2.5.1/spec/mongo/operation/write/insert_spec.rb0000644000004100000410000001462613257253113022747 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Write::Insert do let(:documents) do [{ '_id' => 1, 'name' => 'test' }] end let(:spec) do { :documents => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1) } end after do authorized_collection.delete_many end let(:insert) do described_class.new(spec) end describe '#initialize' do context 'spec' do it 'sets the spec' do expect(insert.spec).to eq(spec) end end end describe '#==' do context 'spec' do context 'when two inserts have the same specs' do let(:other) do described_class.new(spec) end it 'returns true' do expect(insert).to eq(other) end end context 'when two inserts have different specs' do let(:other_docs) do [{ :bar => 1 }] end let(:other_spec) do { :documents => other_docs, :db_name => 'test', :coll_name => 'test_coll', :write_concern => { 'w' => 1 } } end let(:other) do described_class.new(other_spec) end it 'returns false' do expect(insert).not_to eq(other) end end end end describe 'document ids' do context 'when documents do not contain an id' do let(:documents) do [{ 'field' => 'test' }, { 'field' => 'test' }] end let(:inserted_ids) do insert.execute(authorized_primary).inserted_ids end let(:collection_ids) do authorized_collection.find(field: 'test').collect { |d| d['_id'] } end it 'adds an id to the documents' do expect(inserted_ids).to eq(collection_ids) end end end describe '#execute' do before do authorized_collection.indexes.create_one({ name: 1 }, { unique: true }) end after do authorized_collection.delete_many authorized_collection.indexes.drop_one('name_1') end context 'when inserting a single document' do context 'when the insert succeeds' do let!(:response) do insert.execute(authorized_primary) end it 'reports the correct written count' do expect(response.written_count).to eq(1) end it 'inserts the document into the collection' do expect(authorized_collection.find(_id: 1).to_a). to eq(documents) end end context 'when the insert fails' do let(:documents) do [{ name: 'test' }] end let(:spec) do { :documents => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1) } end let(:failing_insert) do described_class.new(spec) end it 'raises an error' do expect { failing_insert.execute(authorized_primary) failing_insert.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when inserting multiple documents' do context 'when the insert succeeds' do let(:documents) do [{ '_id' => 1, 'name' => 'test1' }, { '_id' => 2, 'name' => 'test2' }] end let!(:response) do insert.execute(authorized_primary) end it 'reports the correct written count' do expect(response.written_count).to eq(2) end it 'inserts the documents into the collection' do expect(authorized_collection.find.to_a). to eq(documents) end end context 'when the insert fails on the last document' do let(:documents) do [{ name: 'test3' }, { name: 'test' }] end let(:spec) do { :documents => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1) } end let(:failing_insert) do described_class.new(spec) end it 'raises an error' do expect { failing_insert.execute(authorized_primary) failing_insert.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the insert fails on the first document' do let(:documents) do [{ name: 'test' }, { name: 'test4' }] end let(:spec) do { :documents => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 1) } end let(:failing_insert) do described_class.new(spec) end it 'raises an error' do expect { failing_insert.execute(authorized_primary) failing_insert.execute(authorized_primary) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when a document exceeds max bson size' do let(:documents) do [{ :x => 'y'* 17000000 }] end it 'raises an error' do expect { insert.execute(authorized_primary) }.to raise_error(Mongo::Error::MaxBSONSize) end it 'does not insert the document' do expect { insert.execute(authorized_primary) }.to raise_error(Mongo::Error::MaxBSONSize) expect(authorized_collection.find.count).to eq(0) end end end context 'when write concern { w: 0 } is used', unless: op_msg_enabled? do let(:spec) do { :documents => documents, :db_name => TEST_DB, :coll_name => TEST_COLL, :write_concern => Mongo::WriteConcern.get(:w => 0) } end let(:documents) do [{ '_id' => 1 }] end let(:op) do described_class.new(spec) end before do expect(op).to receive(:execute_message).and_call_original end let(:response) do op.execute(authorized_primary) end it 'uses op codes instead of write commands' do expect(response.written_count).to eq(0) end end end end mongo-2.5.1/spec/mongo/operation/result_spec.rb0000644000004100000410000001432113257253113021617 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Result do let(:result) do described_class.new(reply) end let(:cursor_id) { 0 } let(:documents) { [] } let(:flags) { [] } let(:starting_from) { 0 } let(:reply) do Mongo::Protocol::Reply.new.tap do |reply| reply.instance_variable_set(:@flags, flags) reply.instance_variable_set(:@cursor_id, cursor_id) reply.instance_variable_set(:@starting_from, starting_from) reply.instance_variable_set(:@number_returned, documents.size) reply.instance_variable_set(:@documents, documents) end end describe '#acknowledged?' do context 'when the reply is for a read command' do let(:documents) do [{ 'ismaster' => true, 'ok' => 1.0 }] end it 'returns true' do expect(result).to be_acknowledged end end context 'when the reply is for a write command' do context 'when the command was acknowledged' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end it 'returns true' do expect(result).to be_acknowledged end end context 'when the command was not acknowledged' do let(:reply) { nil } it 'returns false' do expect(result).to_not be_acknowledged end end end end describe '#cursor_id' do context 'when the reply exists' do let(:cursor_id) { 5 } it 'delegates to the reply' do expect(result.cursor_id).to eq(5) end end context 'when the reply does not exist' do let(:reply) { nil } it 'returns zero' do expect(result.cursor_id).to eq(0) end end end describe '#documents' do context 'when the result is for a command' do context 'when a reply is received' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end it 'returns the documents' do expect(result.documents).to eq(documents) end end context 'when a reply is not received' do let(:reply) { nil } it 'returns an empty array' do expect(result.documents).to be_empty end end end end describe '#each' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end context 'when a block is given' do it 'yields to each document' do result.each do |document| expect(document).to eq(documents.first) end end end context 'when no block is given' do it 'returns an enumerator' do expect(result.each).to be_a(Enumerator) end end end describe '#initialize' do it 'sets the replies' do expect(result.replies).to eq([ reply ]) end end describe '#returned_count' do context 'when the reply is for a read command' do let(:documents) do [{ 'ismaster' => true, 'ok' => 1.0 }] end it 'returns the number returned' do expect(result.returned_count).to eq(1) end end context 'when the reply is for a write command' do context 'when the write is acknowledged' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end it 'returns the number returned' do expect(result.returned_count).to eq(1) end end context 'when the write is not acknowledged' do let(:reply) { nil } it 'returns zero' do expect(result.returned_count).to eq(0) end end end end describe '#successful?' do context 'when the reply is for a read command' do let(:documents) do [{ 'ismaster' => true, 'ok' => 1.0 }] end it 'returns true' do expect(result).to be_successful end end context 'when the reply is for a query' do context 'when the query has no errors' do let(:documents) do [{ 'field' => 'name' }] end it 'returns true' do expect(result).to be_successful end end context 'when the query has errors' do let(:documents) do [{ '$err' => 'not authorized for query on test.system.namespaces', 'code'=> 16550 }] end it 'returns false' do expect(result).to_not be_successful end end context 'when the query reply has the cursor_not_found flag set' do let(:flags) do [ :cursor_not_found ] end let(:documents) do [] end it 'returns false' do expect(result).to_not be_successful end end end context 'when the reply is for a write command' do context 'when the write is acknowledged' do context 'when ok is 1' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end it 'returns true' do expect(result).to be_successful end end context 'when ok is not 1' do let(:documents) do [{ "ok" => 0, "n" => 0 }] end it 'returns false' do expect(result).to_not be_successful end end end context 'when the write is not acknowledged' do let(:reply) { nil } it 'returns true' do expect(result).to be_successful end end end end describe '#written_count' do context 'when the reply is for a read command' do let(:documents) do [{ 'ismaster' => true, 'ok' => 1.0 }] end it 'returns the number written' do expect(result.written_count).to eq(0) end end context 'when the reply is for a write command' do let(:documents) do [{ "ok" => 1, "n" => 2 }] end it 'returns the number written' do expect(result.written_count).to eq(2) end end end context 'when there is a top-level Result class defined' do before do class Result def get_result(address) Mongo::Client.new([address], TEST_OPTIONS).database.command(:ping => 1) end end end let(:result) do Result.new.get_result(default_address.to_s) end it 'uses the Result class of the operation' do expect(result).to be_a(Mongo::Operation::Result) end end end mongo-2.5.1/spec/mongo/operation/specifiable_spec.rb0000644000004100000410000000320613257253113022547 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Specifiable do let(:spec) do {} end let(:specifiable) do Class.new do include Mongo::Operation::Specifiable end.new(spec) end describe '#==' do context 'when the other object is a specifiable' do context 'when the specs are equal' do let(:other) do Class.new do include Mongo::Operation::Specifiable end.new(spec) end it 'returns true' do expect(specifiable).to eq(other) end end context 'when the specs are not equal' do let(:other) do Class.new do include Mongo::Operation::Specifiable end.new({ :db_name => 'test' }) end it 'returns false' do expect(specifiable).to_not eq(other) end end end context 'when the other object is not a specifiable' do it 'returns false' do expect(specifiable).to_not eq('test') end end end describe '#read' do context 'when read is specified' do let(:spec) do { read: { mode: :secondary} } end let(:server_selector) do Mongo::ServerSelector.get(spec[:read]) end it 'converts the read option to a ServerSelector' do expect(specifiable.read).to be_a(Mongo::ServerSelector::Secondary) end it 'uses the read option provided' do expect(specifiable.read).to eq(server_selector) end end context 'when read is not specified' do it 'returns nil' do expect(specifiable.read).to be_nil end end end end mongo-2.5.1/spec/mongo/operation/read_preference_spec.rb0000644000004100000410000001216213257253113023413 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::ReadPreference do let(:selector) do { name: 'test' } end let(:options) do {} end let(:cluster_double) do double('cluster') end let(:single?) do true end let(:mongos?) do false end let(:read_pref) do Mongo::ServerSelector.get end let(:operation) do Class.new do include Mongo::Operation::ReadPreference end.new.tap do |rp| allow(rp).to receive(:read).and_return(read_pref) allow(rp).to receive(:selector).and_return(selector) allow(rp).to receive(:options).and_return(options) end end let(:server) do double('server').tap do |c| allow(c).to receive(:cluster).and_return(cluster_double) allow(cluster_double).to receive(:single?).and_return(single?) allow(c).to receive(:mongos?).and_return(mongos?) end end shared_context 'a selector updater' do let(:read_pref) do Mongo::ServerSelector.get(:mode => mode) end let(:expected) do { :$query => selector, :$readPreference => read_pref.to_mongos } end it 'returns a special selector' do expect(operation.send(:update_selector_for_read_pref, operation.send(:selector), server)).to eq(expected) end context 'when the selector already has $query in it' do let(:selector) do { :$query => { :name => 'test' }, :$orderby => { :name => -1 } } end let(:expected) do selector.merge(:$readPreference => read_pref.to_mongos) end it 'returns an unaltered special selector' do expect(operation.send(:update_selector_for_read_pref, operation.send(:selector), server)).to eq(expected) end end end shared_context 'not a selector updater' do let(:read_pref) do Mongo::ServerSelector.get(:mode => mode) end it 'returns a selector' do expect(operation.send(:update_selector_for_read_pref, operation.send(:selector), server)).to eq(selector) end end context 'when the server is a mongos' do let(:mongos?) do true end context 'when the read preference mode is primary' do let(:mode) do :primary end it_behaves_like 'not a selector updater' end context 'when the read preference mode is primary_preferred' do let(:mode) do :primary_preferred end it_behaves_like 'a selector updater' end context 'when the read preference mode is secondary' do let(:mode) do :secondary end it_behaves_like 'a selector updater' end context 'when the read preference mode is secondary_preferred' do let(:mode) do :secondary_preferred end it_behaves_like 'not a selector updater' end context 'when the read preference mode is nearest' do let(:mode) do :nearest end it_behaves_like 'a selector updater' end end context 'when the server is not a mongos' do let(:mongos?) do false end let(:mode) do :secondary_preferred end it_behaves_like 'not a selector updater' end context 'when the topology is Single' do let(:single?) do true end context 'when the server is a mongos' do let(:mongos?) do true end let(:expected) do { } end it 'does not set the slave_ok flag' do expect(operation.send(:update_options_for_slave_ok, operation.send(:options), server)).to eq(expected) end end context 'when the server is not a mongos' do let(:mongos?) do false end let(:expected) do { :flags => [ :slave_ok ] } end it 'sets the slave_ok flag' do expect(operation.send(:update_options_for_slave_ok, operation.send(:options), server)).to eq(expected) end end end context 'when the topology is not Single' do let(:single?) do false end context 'when there is no read preference set' do let(:read_pref) do Mongo::ServerSelector.get end let(:expected) do { } end it 'does not set the slave_ok flag' do expect(operation.send(:update_options_for_slave_ok, operation.send(:options), server)).to eq(expected) end end context 'when there is a read preference' do context 'when the read preference requires the slave_ok flag' do let(:read_pref) do Mongo::ServerSelector.get(:mode => :secondary) end let(:expected) do { :flags => [ :slave_ok ] } end it 'sets the slave_ok flag' do expect(operation.send(:update_options_for_slave_ok, operation.send(:options), server)).to eq(expected) end end context 'when the read preference does not require the slave_ok flag' do let(:read_pref) do Mongo::ServerSelector.get(:mode => :primary) end let(:expected) do { } end it 'does not set the slave_ok flag' do expect(operation.send(:update_options_for_slave_ok, operation.send(:options), server)).to eq(expected) end end end end end mongo-2.5.1/spec/mongo/operation/kill_cursors_spec.rb0000644000004100000410000000155513257253113023021 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::KillCursors do let(:spec) do { coll_name: TEST_COLL, db_name: TEST_DB, :cursor_ids => [1,2] } end let(:op) { described_class.new(spec) } describe '#initialize' do it 'sets the spec' do expect(op.spec).to be(spec) end end describe '#==' do context ' when two ops have different specs' do let(:other_spec) do { :cursor_ids => [1, 2, 3] } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#message' do it 'creates a kill cursors wire protocol message with correct specs' do expect(Mongo::Protocol::KillCursors).to receive(:new).with(TEST_COLL, TEST_DB, spec[:cursor_ids]) op.send(:message, authorized_primary) end end end mongo-2.5.1/spec/mongo/operation/limited_spec.rb0000644000004100000410000000202513257253113021726 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Limited do describe '#options' do let(:limited) do Class.new do include Mongo::Operation::Specifiable include Mongo::Operation::Limited end.new({ :options => spec }) end context 'when no limit is provided' do let(:spec) do { :skip => 5 } end it 'returns a limit of -1' do expect(limited.options).to eq({ :skip => 5, :limit => -1 }) end end context 'when a limit is already provided' do context 'when the limit is -1' do let(:spec) do { :skip => 5, :limit => -1 } end it 'returns a limit of -1' do expect(limited.options).to eq({ :skip => 5, :limit => -1 }) end end context 'when the limit is not -1' do let(:spec) do { :skip => 5, :limit => 5 } end it 'returns a limit of -1' do expect(limited.options).to eq({ :skip => 5, :limit => -1 }) end end end end end mongo-2.5.1/spec/mongo/operation/read/0000755000004100000410000000000013257253113017654 5ustar www-datawww-datamongo-2.5.1/spec/mongo/operation/read/query_spec.rb0000644000004100000410000000465113257253113022366 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Read::Query, unless: op_msg_enabled? do let(:selector) { { foo: 1 } } let(:query_options) { {} } let(:spec) do { :selector => selector, :options => query_options, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name, :read => Mongo::ServerSelector.get } end let(:op) { described_class.new(spec) } describe '#initialize' do context 'query spec' do it 'sets the query spec' do expect(op.spec).to be(spec) end end end describe '#==' do context 'when two ops have different specs' do let(:other_spec) do { :selector => { :a => 1 }, :options => query_options, :db_name => authorized_collection.database.name, :coll_name => authorized_collection.name } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#message' do let(:query_options) do { :flags => [ :no_cursor_timeout ]} end let(:query) do described_class.new(spec) end let(:cluster_single) do double('cluster').tap do |c| allow(c).to receive(:single?).and_return(true) end end let(:message) do query.send(:message, authorized_primary) end it 'applies the correct flags' do expect(message.flags).to eq(query_options[:flags]) end context 'when the server is a secondary' do let(:secondary_server_single) do double('secondary_server').tap do |server| allow(server).to receive(:mongos?) { false } allow(server).to receive(:cluster) { cluster_single } allow(server).to receive(:features) { authorized_primary.features } end end let(:message) do query.send(:message, secondary_server_single) end it 'applies the correct flags' do expect(message.flags).to eq([ :no_cursor_timeout, :slave_ok ]) end end context "when the document contains an 'ok' field" do before do authorized_collection.insert_one(ok: false) end after do authorized_collection.delete_many end it 'does not raise an exception' do expect(op.execute(authorized_primary)).to be_a(Mongo::Operation::Read::Query::Result) end end end end mongo-2.5.1/spec/mongo/operation/read/get_more_spec.rb0000644000004100000410000000206113257253113023013 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Operation::Read::GetMore do let(:to_return) do 50 end let(:cursor_id) do 1 end let(:spec) do { :db_name => TEST_DB, :coll_name => TEST_COLL, :to_return => to_return, :cursor_id => cursor_id } end let(:op) { described_class.new(spec) } describe '#initialize' do it 'sets the spec' do expect(op.spec).to be(spec) end end describe '#==' do context ' when two ops have different specs' do let(:other_spec) do { :db_name => 'test_db', :coll_name => 'test_coll', :to_return => 50, :cursor_id => 2 } end let(:other) { described_class.new(other_spec) } it 'returns false' do expect(op).not_to eq(other) end end end describe '#message' do it 'creates a get more wire protocol message with correct specs' do expect(Mongo::Protocol::GetMore).to receive(:new).with(TEST_DB, TEST_COLL, to_return, cursor_id) op.send(:message, authorized_primary) end end end mongo-2.5.1/spec/mongo/index/0000755000004100000410000000000013257253113016050 5ustar www-datawww-datamongo-2.5.1/spec/mongo/index/view_spec.rb0000644000004100000410000005364213257253113020373 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Index::View do let(:view) do described_class.new(authorized_collection, options) end let(:options) do {} end describe '#drop_one' do let(:spec) do { another: -1 } end after do begin; view.drop_one('another_-1'); rescue; end end before do view.create_one(spec, unique: true) end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:client) do authorized_client end let(:operation) do view_with_session.drop_one('another_-1') end let(:failed_operation) do view_with_session.drop_one('_another_-1') end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when the index exists' do let(:result) do view.drop_one('another_-1') end it 'drops the index' do expect(result).to be_successful end end context 'when passing a * as the name' do it 'raises an exception' do expect { view.drop_one('*') }.to raise_error(Mongo::Error::MultiIndexDrop) end end context 'when the collection has a write concern' do let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view_with_write_concern) do described_class.new(collection) end let(:result) do view_with_write_concern.drop_one('another_-1') end context 'when the server accepts writeConcern for the dropIndexes operation', if: collation_enabled? do it 'applies the write concern' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not accept writeConcern for the dropIndexes operation', unless: collation_enabled? do it 'does not apply the write concern' do expect(result).to be_successful end end end context 'when there are multiple indexes with the same key pattern', if: collation_enabled? do before do view.create_one({ random: 1 }, unique: true) view.create_one({ random: 1 }, name: 'random_1_with_collation', unique: true, collation: { locale: 'en_US', strength: 2 }) end context 'when a name is supplied' do let!(:result) do view.drop_one('random_1_with_collation') end let(:index_names) do view.collect { |model| model['name'] } end it 'returns ok' do expect(result).to be_successful end it 'drops the correct index' do expect(index_names).not_to include('random_1_with_collation') expect(index_names).to include('random_1') end end end end describe '#drop_all' do let(:spec) do { another: -1 } end before do view.create_one(spec, unique: true) end context 'when indexes exists' do let(:result) do view.drop_all end it 'drops the index' do expect(result).to be_successful end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:operation) do view_with_session.drop_all end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when the collection has a write concern' do let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view_with_write_concern) do described_class.new(collection) end let(:result) do view_with_write_concern.drop_all end after do view.drop_all end context 'when the server accepts writeConcern for the dropIndexes operation', if: collation_enabled? do it 'applies the write concern' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not accept writeConcern for the dropIndexes operation', unless: collation_enabled? do it 'does not apply the write concern' do expect(result).to be_successful end end end end end describe '#create_many' do context 'when the indexes are created' do context 'when passing multi-args' do context 'when the index creation is successful' do let!(:result) do view.create_many( { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ) end after do view.drop_one('random_1') view.drop_one('testing_-1') end it 'returns ok' do expect(result).to be_successful end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:operation) do view_with_session.create_many( { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ) end let(:client) do authorized_client end let(:failed_operation) do view_with_session.create_many( { key: { random: 1 }, invalid: true } ) end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end end context 'when collation is specified', if: collation_enabled? do let(:result) do view.create_many( { key: { random: 1 }, unique: true, collation: { locale: 'en_US', strength: 2 } } ) end after do begin; view.drop_one('random_1'); rescue; end end let(:index_info) do view.get('random_1') end context 'when the server supports collations', if: collation_enabled? do it 'returns ok' do expect(result).to be_successful end it 'applies the collation to the new index' do result expect(index_info['collation']).not_to be_nil expect(index_info['collation']['locale']).to eq('en_US') expect(index_info['collation']['strength']).to eq(2) end end context 'when the server does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:result) do view.create_many( { key: { random: 1 }, unique: true, 'collation' => { locale: 'en_US', strength: 2 } } ) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the collection has a write concern' do after do begin; view.drop_one('random_1'); rescue; end begin; view.drop_one('testing_-1'); rescue; end end let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view_with_write_concern) do described_class.new(collection) end let(:result) do view_with_write_concern.create_many( { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ) end context 'when the server accepts writeConcern for the createIndexes operation', if: collation_enabled? do it 'applies the write concern' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not accept writeConcern for the createIndexes operation', unless: collation_enabled? do it 'does not apply the write concern' do expect(result).to be_successful end end end end context 'when passing an array' do context 'when the index creation is successful' do let!(:result) do view.create_many([ { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ]) end after do view.drop_one('random_1') view.drop_one('testing_-1') end it 'returns ok' do expect(result).to be_successful end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:operation) do view_with_session.create_many([ { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ]) end let(:failed_operation) do view_with_session.create_many([ { key: { random: 1 }, invalid: true }]) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end end context 'when collation is specified' do let(:result) do view.create_many([ { key: { random: 1 }, unique: true, collation: { locale: 'en_US', strength: 2 }}, ]) end let(:index_info) do view.get('random_1') end after do begin; view.drop_one('random_1'); rescue; end end context 'when the server supports collations', if: collation_enabled? do it 'returns ok' do expect(result).to be_successful end it 'applies the collation to the new index' do result expect(index_info['collation']).not_to be_nil expect(index_info['collation']['locale']).to eq('en_US') expect(index_info['collation']['strength']).to eq(2) end end context 'when the server does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:result) do view.create_many([ { key: { random: 1 }, unique: true, 'collation' => { locale: 'en_US', strength: 2 }}, ]) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the collection has a write concern' do after do begin; view.drop_one('random_1'); rescue; end begin; view.drop_one('testing_-1'); rescue; end end let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view_with_write_concern) do described_class.new(collection) end let(:result) do view_with_write_concern.create_many([ { key: { random: 1 }, unique: true }, { key: { testing: -1 }, unique: true } ]) end context 'when the server accepts writeConcern for the createIndexes operation', if: collation_enabled? do it 'applies the write concern' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not accept writeConcern for the createIndexes operation', unless: collation_enabled? do it 'does not apply the write concern' do expect(result).to be_successful end end end end context 'when index creation fails' do let(:spec) do { name: 1 } end before do view.create_one(spec, unique: true) end after do view.drop_one('name_1') end it 'raises an exception' do expect { view.create_many([{ key: { name: 1 }, unique: false }]) }.to raise_error(Mongo::Error::OperationFailure) end end end end describe '#create_one' do context 'when the index is created' do let(:spec) do { random: 1 } end let(:result) do view.create_one(spec, unique: true) end it 'returns ok' do expect(result).to be_successful end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:operation) do view_with_session.create_one(spec, unique: true) end let(:failed_operation) do view_with_session.create_one(spec, invalid: true) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when the collection has a write concern' do after do begin; view.drop_one('random_1'); rescue; end end let(:collection) do authorized_collection.with(write: INVALID_WRITE_CONCERN) end let(:view_with_write_concern) do described_class.new(collection) end let(:result) do view_with_write_concern.create_one(spec, unique: true) end context 'when the server accepts writeConcern for the createIndexes operation', if: collation_enabled? do it 'applies the write concern' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not accept writeConcern for the createIndexes operation', unless: collation_enabled? do it 'does not apply the write concern' do expect(result).to be_successful end end end context 'when the index is created on an subdocument field' do after do begin; view.drop_one('random_1'); rescue; end end let(:spec) do { 'sub_document.random' => 1 } end let(:result) do view.create_one(spec, unique: true) end after do begin; view.drop_one('sub_document.random_1'); rescue; end end it 'returns ok' do expect(result).to be_successful end end end context 'when index creation fails' do let(:spec) do { name: 1 } end before do view.create_one(spec, unique: true) end after do view.drop_one('name_1') end it 'raises an exception' do expect { view.create_one(spec, unique: false) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when providing an index name' do let(:spec) do { random: 1 } end let!(:result) do view.create_one(spec, unique: true, name: 'random_name') end after do view.drop_one('random_name') end it 'returns ok' do expect(result).to be_successful end it 'defines the index with the provided name' do expect(view.get('random_name')).to_not be_nil end end context 'when providing an invalid partial index filter', if: find_command_enabled? do it 'raises an exception' do expect { view.create_one({'x' => 1}, partial_filter_expression: 5) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when providing a valid partial index filter', if: find_command_enabled? do let(:expression) do {'a' => {'$lte' => 1.5}} end let!(:result) do view.create_one({'x' => 1}, partial_filter_expression: expression) end let(:indexes) do authorized_collection.indexes.get('x_1') end after do view.drop_one('x_1') end it 'returns ok' do expect(result).to be_successful end it 'creates an index' do expect(indexes).to_not be_nil end it 'passes partialFilterExpression correctly' do expect(indexes[:partialFilterExpression]).to eq(expression) end end end describe '#get' do let(:spec) do { random: 1 } end let!(:result) do view.create_one(spec, unique: true, name: 'random_name') end after do begin; view.drop_one('random_name'); rescue; end end context 'when providing a name' do let(:index) do view.get('random_name') end it 'returns the index' do expect(index['name']).to eq('random_name') end end context 'when providing a spec' do let(:index) do view.get(random: 1) end it 'returns the index' do expect(index['name']).to eq('random_name') end end context 'when provided a session' do let(:view_with_session) do described_class.new(authorized_collection, session: session) end let(:operation) do view_with_session.get(random: 1) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when the index does not exist' do it 'returns nil' do expect(view.get(other: 1)).to be_nil end end end describe '#each' do context 'when the collection exists' do let(:spec) do { name: 1 } end before do view.create_one(spec, unique: true) end after do view.drop_one('name_1') end let(:indexes) do view.each end it 'returns all the indexes for the database' do expect(indexes.to_a.count).to eq(2) end end context 'when the collection does not exist' do let(:nonexistant_collection) do authorized_client[:not_a_collection] end let(:nonexistant_view) do described_class.new(nonexistant_collection) end it 'raises a nonexistant collection error', if: list_command_enabled? do expect { nonexistant_view.each.to_a }.to raise_error(Mongo::Error::OperationFailure) end end end describe '#normalize_models' do context 'when providing options' do let(:options) do { :key => { :name => 1 }, :bucket_size => 5, :default_language => 'deutsch', :expire_after => 10, :language_override => 'language', :sphere_version => 1, :storage_engine => 'wiredtiger', :text_version => 2, :version => 1 } end let(:models) do view.send(:normalize_models, [ options ], authorized_primary) end let(:expected) do { :key => { :name => 1 }, :name => 'name_1', :bucketSize => 5, :default_language => 'deutsch', :expireAfterSeconds => 10, :language_override => 'language', :'2dsphereIndexVersion' => 1, :storageEngine => 'wiredtiger', :textIndexVersion => 2, :v => 1 } end it 'maps the ruby options to the server options' do expect(models).to eq([ expected ]) end context 'when using alternate names' do let(:extended_options) do options.merge!(expire_after_seconds: 5) end let(:extended_expected) do expected.tap { |exp| exp[:expireAfterSeconds] = 5 } end let(:models) do view.send(:normalize_models, [ extended_options ], authorized_primary) end it 'maps the ruby options to the server options' do expect(models).to eq([ extended_expected ]) end end context 'when the server supports collations', if: collation_enabled? do let(:extended_options) do options.merge(:collation => { locale: 'en_US' } ) end let(:models) do view.send(:normalize_models, [ extended_options ], authorized_primary) end let(:extended_expected) do expected.tap { |exp| exp[:collation] = { locale: 'en_US' } } end it 'maps the ruby options to the server options' do expect(models).to eq([ extended_expected ]) end end end end end mongo-2.5.1/spec/mongo/server/0000755000004100000410000000000013257253113016247 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server/connection_spec.rb0000644000004100000410000004523613257253113021757 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Connection do let(:address) do default_address end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:app_metadata) do Mongo::Cluster::AppMetadata.new(authorized_client.cluster) end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) allow(cl).to receive(:cluster_time).and_return(nil) allow(cl).to receive(:update_cluster_time) end end let(:topology) do double('topology') end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:pool) do double('pool') end after do allow(cluster).to receive(:pool).with(server).and_return(pool) allow(pool).to receive(:disconnect!).and_return(true) server.disconnect! end describe '#connectable?' do context 'when the connection is not connectable' do let(:bad_address) do Mongo::Address.new('127.0.0.1:666') end let(:bad_server) do Mongo::Server.new(bad_address, cluster, monitoring, listeners, TEST_OPTIONS) end let(:connection) do described_class.new(bad_server) end it 'returns false' do expect(connection).to_not be_connectable end end end describe '#connect!' do context 'when no socket exists' do let(:connection) do described_class.new(server, server.options) end let!(:result) do connection.connect! end let(:socket) do connection.send(:socket) end it 'returns true' do expect(result).to be true end it 'creates a socket' do expect(socket).to_not be_nil end it 'connects the socket' do expect(socket).to be_alive end end context 'when a socket exists' do let(:connection) do described_class.new(server, server.options) end before do connection.connect! connection.connect! end let(:socket) do connection.send(:socket) end it 'keeps the socket alive' do expect(socket).to be_alive end end context 'when user credentials exist' do context 'when the user is not authorized' do let(:connection) do described_class.new( server, TEST_OPTIONS.merge( :user => 'notauser', :password => 'password', :database => TEST_DB, :heartbeat_frequency => 30) ) end let!(:error) do e = begin; connection.send(:ensure_connected); rescue => ex; ex; end end it 'raises an error' do expect(error).to be_a(Mongo::Auth::Unauthorized) end it 'disconnects the socket' do expect(connection.send(:socket)).to be(nil) end it 'marks the server as unknown' do expect(server).to be_unknown end end describe 'when the user is authorized' do let(:connection) do described_class.new( server, TEST_OPTIONS.merge( :user => TEST_USER.name, :password => TEST_USER.password, :database => TEST_DB ) ) end before do connection.connect! end it 'sets the connection as connected' do expect(connection).to be_connected end end end end describe '#disconnect!' do context 'when a socket is not connected' do let(:connection) do described_class.new(server, server.options) end it 'does not raise an error' do expect(connection.disconnect!).to be true end end context 'when a socket is connected' do let(:connection) do described_class.new(server, server.options) end before do connection.connect! connection.disconnect! end it 'disconnects the socket' do expect(connection.send(:socket)).to be_nil end end end describe '#dispatch' do let!(:connection) do described_class.new( server, TEST_OPTIONS.merge( :user => TEST_USER.name, :password => TEST_USER.password, :database => TEST_DB ) ) end let(:documents) do [{ 'name' => 'testing' }] end let(:insert) do Mongo::Protocol::Insert.new(TEST_DB, TEST_COLL, documents) end let(:query) do Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { 'name' => 'testing' }) end context 'when providing a single message' do let(:reply) do connection.dispatch([ insert, query ]) end after do authorized_collection.delete_many end it 'it dispatchs the message to the socket' do expect(reply.documents.first['name']).to eq('testing') end end context 'when providing multiple messages' do let(:selector) do { :getlasterror => 1 } end let(:command) do Mongo::Protocol::Query.new(TEST_DB, '$cmd', selector, :limit => -1) end let(:reply) do connection.dispatch([ insert, command ]) end after do authorized_collection.delete_many end it 'it dispatchs the message to the socket' do expect(reply.documents.first['ok']).to eq(1.0) end end context 'when the response_to does not match the request_id' do let(:documents) do [{ 'name' => 'bob' }, { 'name' => 'alice' }] end let(:insert) do Mongo::Protocol::Insert.new(TEST_DB, TEST_COLL, documents) end let(:query_bob) do Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { name: 'bob' }) end let(:query_alice) do Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { name: 'alice' }) end after do authorized_collection.delete_many end before do # Fake a query for which we did not read the response. See RUBY-1117 allow(query_bob).to receive(:replyable?) { false } connection.dispatch([ insert, query_bob ]) end it 'raises an UnexpectedResponse error' do expect { connection.dispatch([ query_alice ]) }.to raise_error(Mongo::Error::UnexpectedResponse, /Got response for request ID \d+ but expected response for request ID \d+/) end it 'does not affect subsequent requests' do expect { connection.dispatch([ query_alice ]) }.to raise_error(Mongo::Error::UnexpectedResponse) expect(connection.dispatch([ query_alice ]).documents.first['name']).to eq('alice') end end context 'when a request is interrupted (Thread.kill)' do let(:documents) do [{ 'name' => 'bob' }, { 'name' => 'alice' }] end let(:insert) do Mongo::Protocol::Insert.new(TEST_DB, TEST_COLL, documents) end let(:query_bob) do Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { name: 'bob' }) end let(:query_alice) do Mongo::Protocol::Query.new(TEST_DB, TEST_COLL, { name: 'alice' }) end before do connection.dispatch([ insert ]) end after do authorized_collection.delete_many end it 'closes the socket and does not use it for subsequent requests' do t = Thread.new { # Kill the thread just before the reply is read allow(Mongo::Protocol::Reply).to receive(:deserialize_header) { t.kill and t.stop? } connection.dispatch([ query_bob ]) } t.join allow(Mongo::Protocol::Message).to receive(:deserialize_header).and_call_original expect(connection.dispatch([ query_alice ]).documents.first['name']).to eq('alice') end end context 'when the message exceeds the max size' do context 'when the message is an insert' do before do allow(connection).to receive(:max_message_size).and_return(200) end let(:documents) do [{ 'name' => 'testing' } ] * 10 end let(:reply) do connection.dispatch([ insert ]) end it 'checks the size against the max message size' do expect { reply }.to raise_exception(Mongo::Error::MaxMessageSize) end end context 'when the message is a command' do before do allow(connection).to receive(:max_bson_object_size).and_return(100) end let(:selector) do { :getlasterror => '1' } end let(:command) do Mongo::Protocol::Query.new(TEST_DB, '$cmd', selector, :limit => -1) end let(:reply) do connection.dispatch([ command ]) end it 'checks the size against the max bson size' do expect { reply }.to raise_exception(Mongo::Error::MaxBSONSize) end end end context 'when a network or socket error occurs' do let(:socket) do connection.connect! connection.instance_variable_get(:@socket) end before do expect(socket).to receive(:write).and_raise(Mongo::Error::SocketError) end it 'disconnects and raises the exception' do expect { connection.dispatch([ insert ]) }.to raise_error(Mongo::Error::SocketError) expect(connection).to_not be_connected end end context 'when a socket timeout is set' do let(:connection) do described_class.new(server, socket_timeout: 10) end it 'sets the timeout' do expect(connection.timeout).to eq(10) end let(:client) do authorized_client.with(socket_timeout: 1.5) end before do authorized_collection.insert_one(a: 1) end after do sleep(0.5) authorized_collection.delete_many client.close end it 'raises a timeout when it expires' do start = Time.now begin Timeout::timeout(1.5 + 2) do client[authorized_collection.name].find("$where" => "sleep(2000) || true").first end rescue => ex end_time = Time.now expect(ex).to be_a(Timeout::Error) expect(ex.message).to eq("Took more than 1.5 seconds to receive data.") end # Account for wait queue timeout (2s) and rescue expect(end_time - start).to be_within(2.5).of(1.5) end context 'when the socket_timeout is negative' do let(:connection) do described_class.new(server, server.options) end let(:messages) do [ insert ] end before do connection.send(:write, messages) connection.send(:socket).instance_variable_set(:@timeout, -(Time.now.to_i)) end let(:reply) do connection.send(:read, messages.last.request_id) end it 'raises a timeout error' do expect { reply }.to raise_exception(Timeout::Error) end end end context 'when the process is forked' do let(:insert) do Mongo::Protocol::Insert.new(TEST_DB, TEST_COLL, documents) end before do expect(Process).to receive(:pid).at_least(:once).and_return(1) end after do authorized_collection.delete_many end it 'disconnects the connection' do expect(connection).to receive(:disconnect!).and_call_original connection.dispatch([ insert ]) end it 'sets a new pid' do connection.dispatch([ insert ]) expect(connection.pid).to eq(1) end end end describe '#initialize' do context 'when host and port are provided' do let(:connection) do described_class.new(server, server.options) end it 'sets the address' do expect(connection.address).to eq(server.address) end it 'sets the socket to nil' do expect(connection.send(:socket)).to be_nil end it 'does not set the timeout to the default' do expect(connection.timeout).to be_nil end end context 'when timeout options are provided' do let(:connection) do described_class.new(server, socket_timeout: 10) end it 'sets the timeout' do expect(connection.timeout).to eq(10) end end context 'when ssl options are provided' do let(:ssl_options) do { :ssl => true, :ssl_key => 'file', :ssl_key_pass_phrase => 'iamaphrase' } end let(:connection) do described_class.new(server, ssl_options) end it 'sets the ssl options' do expect(connection.send(:ssl_options)).to eq(ssl_options) end end context 'when ssl is false' do context 'when ssl options are provided' do let(:ssl_options) do { :ssl => false, :ssl_key => 'file', :ssl_key_pass_phrase => 'iamaphrase' } end let(:connection) do described_class.new(server, ssl_options) end it 'does not set the ssl options' do expect(connection.send(:ssl_options)).to be_empty end end context 'when ssl options are not provided' do let(:ssl_options) do { :ssl => false } end let(:connection) do described_class.new(server, ssl_options) end it 'does not set the ssl options' do expect(connection.send(:ssl_options)).to be_empty end end end context 'when authentication options are provided' do let(:connection) do described_class.new( server, :user => TEST_USER.name, :password => TEST_USER.password, :database => TEST_DB, :auth_mech => :mongodb_cr ) end let(:user) do Mongo::Auth::User.new( database: TEST_DB, user: TEST_USER.name, password: TEST_USER.password ) end it 'sets the auth options' do expect(connection.options[:user]).to eq(user.name) end end end describe '#auth_mechanism' do let(:connection) do described_class.new(server, server.options) end let(:reply) do double('reply').tap do |r| allow(r).to receive(:documents).and_return([ ismaster ]) end end before do connection.connect! end context 'when the ismaster response indicates the auth mechanism is :scram' do let(:features) do Mongo::Server::Description::Features.new(0..3) end context 'when the server auth mechanism is scram', if: scram_sha_1_enabled? do it 'uses scram' do allow(Mongo::Server::Description::Features).to receive(:new).and_return(features) connection.send(:handshake!) expect(connection.send(:default_mechanism)).to eq(:scram) end end context 'when the server auth mechanism is the default (mongodb_cr)', unless: scram_sha_1_enabled? do it 'uses scram' do allow(Mongo::Server::Description::Features).to receive(:new).and_return(features) connection.send(:handshake!) expect(connection.send(:default_mechanism)).to eq(:scram) end end end context 'when the ismaster response indicates the auth mechanism is :mongodb_cr' do let(:features) do Mongo::Server::Description::Features.new(0..2) end context 'when the server auth mechanism is scram', if: scram_sha_1_enabled? do it 'uses scram' do allow(Mongo::Server::Description::Features).to receive(:new).and_return(features) connection.send(:handshake!) expect(connection.send(:default_mechanism)).to eq(:scram) end end context 'when the server auth mechanism is the default (mongodb_cr)', unless: scram_sha_1_enabled? do it 'uses mongodb_cr' do allow(Mongo::Server::Description::Features).to receive(:new).and_return(features) connection.send(:handshake!) expect(connection.send(:default_mechanism)).to eq(:mongodb_cr) end end end end context 'when different timeout options are set' do let(:client) do authorized_client.with(options) end let(:server) do client.cluster.next_primary end let(:address) do server.address end let(:connection) do described_class.new(server, server.options) end after do client.close end context 'when a connect_timeout is in the options' do context 'when a socket_timeout is in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: 3, socket_timeout: 5) end before do connection.connect! end it 'uses the connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(3) end it 'uses the socket_timeout as the socket_timeout' do expect(connection.send(:socket).timeout).to eq(5) end end context 'when a socket_timeout is not in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: 3, socket_timeout: nil) end before do connection.connect! end it 'uses the connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(3) end it 'does not use a socket_timeout' do expect(connection.send(:socket).timeout).to be(nil) end end end context 'when a connect_timeout is not in the options' do context 'when a socket_timeout is in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: nil, socket_timeout: 5) end before do connection.connect! end it 'uses the default connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(10) end it 'uses the socket_timeout' do expect(connection.send(:socket).timeout).to eq(5) end end context 'when a socket_timeout is not in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: nil, socket_timeout: nil) end before do connection.connect! end it 'uses the default connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(10) end it 'does not use a socket_timeout' do expect(connection.send(:socket).timeout).to be(nil) end end end end end mongo-2.5.1/spec/mongo/server/connection_pool_spec.rb0000644000004100000410000002721513257253113023005 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::ConnectionPool do let(:options) do TEST_OPTIONS.merge(max_pool_size: 2) end let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:topology) do double('topology') end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) end end describe '#checkin' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end context 'when a connection is checked out on the thread' do let!(:connection) do pool.checkout end before do pool.checkin(connection) end let(:queue) do pool.send(:queue).queue end it 'returns the connection to the queue' do expect(queue.size).to eq(1) end end end describe '#checkout' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end context 'when no connection is checked out on the same thread' do let!(:connection) do pool.checkout end it 'returns a new connection' do expect(connection.address).to eq(server.address) end end context 'when a connection is checked out on the same thread' do before do pool.checkout end it 'returns the threads connection' do expect(pool.checkout.address).to eq(server.address) end end context 'when a connection is checked out on a different thread' do let!(:connection) do Thread.new { pool.checkout }.join end it 'returns a new connection' do expect(pool.checkout.address).to eq(server.address) end it 'does not return the same connection instance' do expect(pool.checkout).to_not eql(connection) end end context 'when connections are checked out and checked back in' do it 'pulls the connection from the front of the queue' do first = pool.checkout second = pool.checkout pool.checkin(second) pool.checkin(first) expect(pool.checkout).to be(first) end end end describe '#disconnect!' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end it 'disconnects the queue' do expect(cluster).to receive(:pool).with(server).and_return(pool) expect(pool.send(:queue)).to receive(:disconnect!).once.and_call_original server.disconnect! end end describe '.get' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'returns the pool for the server' do expect(pool).to_not be_nil end end describe '#inspect' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end after do expect(cluster).to receive(:pool).with(server).and_return(pool) server.disconnect! end it 'includes the object id' do expect(pool.inspect).to include(pool.object_id.to_s) end it 'includes the queue inspection' do expect(pool.inspect).to include(pool.__send__(:queue).inspect) end end describe '#with_connection' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end context 'when a connection cannot be checked out' do before do allow(pool).to receive(:checkout).and_return(nil) pool.with_connection { |c| c } end let(:queue) do pool.send(:queue).queue end it 'does not add the connection to the pool' do expect(queue.size).to eq(1) end end end context 'when the connection does not finish authenticating before the thread is killed' do let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end let(:options) do { user: ROOT_USER.name, password: ROOT_USER.password }.merge(TEST_OPTIONS).merge(max_pool_size: 1) end before do t = Thread.new { # Kill the thread when it's authenticating allow(Mongo::Auth).to receive(:get) { t.kill and t.stop? } pool.with_connection { |c| c.send(:ensure_connected) { |socket| socket } } } t.join end it 'disconnects the socket' do expect(pool.checkout.send(:socket)).to be_nil end end describe '#close_stale_sockets!' do let(:server) do Mongo::Server.new(address, authorized_client.cluster, monitoring, listeners, options) end let!(:pool) do described_class.get(server) end let(:queue) do pool.instance_variable_get(:@queue).queue end context 'when there is a max_idle_time specified' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 2, max_idle_time: 0.5) end context 'when the connections have not been checked out' do before do queue.each do |conn| expect(conn).not_to receive(:disconnect!) end sleep(0.5) pool.close_stale_sockets! end it 'does not close any sockets' do expect(queue.none? { |c| c.connected? }).to be(true) end end context 'when the sockets have already been checked out and returned to the pool' do context 'when min size is 0' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 2, min_pool_size: 0, max_idle_time: 0.5) end before do queue.each do |conn| expect(conn).to receive(:disconnect!).and_call_original end pool.checkin(pool.checkout) pool.checkin(pool.checkout) sleep(0.5) pool.close_stale_sockets! end it 'closes all stale sockets' do expect(queue.all? { |c| !c.connected? }).to be(true) end end context 'when min size is > 0' do context 'when more than the number of min_size are checked out' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 5, min_pool_size: 3, max_idle_time: 0.5) end before do first = pool.checkout second = pool.checkout third = pool.checkout fourth = pool.checkout fifth = pool.checkout pool.checkin(fifth) expect(fifth).to receive(:disconnect!).and_call_original expect(fifth).not_to receive(:connect!) sleep(0.5) pool.close_stale_sockets! end it 'closes all stale sockets and does not connect new ones' do expect(queue.size).to be(1) expect(queue[0].connected?).to be(false) end end context 'when between 0 and min_size number of connections are checked out' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 5, min_pool_size: 3, max_idle_time: 0.5) end before do first = pool.checkout second = pool.checkout third = pool.checkout fourth = pool.checkout fifth = pool.checkout pool.checkin(third) pool.checkin(fourth) pool.checkin(fifth) expect(third).to receive(:disconnect!).and_call_original expect(third).not_to receive(:connect!) expect(fourth).to receive(:disconnect!).and_call_original expect(fourth).not_to receive(:connect!) expect(fifth).to receive(:disconnect!).and_call_original expect(fifth).to receive(:connect!).and_call_original sleep(0.5) pool.close_stale_sockets! end it 'closes all stale sockets and does not connect new ones' do expect(queue.size).to be(3) expect(queue[0].connected?).to be(true) expect(queue[1].connected?).to be(false) expect(queue[2].connected?).to be(false) end end context 'when a stale connection is unsuccessfully reconnected' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 5, min_pool_size: 3, max_idle_time: 0.5) end before do first = pool.checkout second = pool.checkout third = pool.checkout fourth = pool.checkout fifth = pool.checkout pool.checkin(third) pool.checkin(fourth) pool.checkin(fifth) expect(third).to receive(:disconnect!).and_call_original expect(third).not_to receive(:connect!) expect(fourth).to receive(:disconnect!).and_call_original expect(fourth).not_to receive(:connect!) expect(fifth).to receive(:disconnect!).and_call_original allow(fifth).to receive(:connect!).and_raise(Mongo::Error::SocketError) sleep(0.5) pool.close_stale_sockets! end it 'is kept in the pool' do expect(queue.size).to be(3) expect(queue[0].connected?).to be(false) expect(queue[1].connected?).to be(false) expect(queue[2].connected?).to be(false) end end context 'when exactly the min_size number of connections is checked out' do let(:options) do TEST_OPTIONS.merge(max_pool_size: 5, min_pool_size: 3, max_idle_time: 0.5) end before do first = pool.checkout second = pool.checkout third = pool.checkout fourth = pool.checkout fifth = pool.checkout pool.checkin(fourth) pool.checkin(fifth) expect(fourth).to receive(:disconnect!).and_call_original expect(fourth).not_to receive(:connect!) expect(fifth).to receive(:disconnect!).and_call_original expect(fifth).not_to receive(:connect!) sleep(0.5) pool.close_stale_sockets! end it 'closes all stale sockets and does not connect new ones' do expect(queue.size).to be(2) expect(queue[0].connected?).to be(false) expect(queue[1].connected?).to be(false) end end end end end context 'when there is no max_idle_time specified' do let(:connection) do conn = pool.checkout conn.connect! pool.checkin(conn) conn end before do expect(connection).not_to receive(:disconnect!) pool.close_stale_sockets! end it 'does not close any sockets' do expect(connection.connected?).to be(true) end end end end mongo-2.5.1/spec/mongo/server/monitor_spec.rb0000644000004100000410000001257613257253113021310 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Monitor do let(:address) do default_address end let(:listeners) do Mongo::Event::Listeners.new end describe '#scan!' do context 'when calling multiple times in succession' do let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS) end it 'throttles the scans to minimum 500ms' do start = Time.now monitor.scan! monitor.scan! expect(Time.now - start).to be >= 0.5 end end context 'when the ismaster fails the first time' do let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS) end let(:socket) do monitor.connection.connect! monitor.connection.__send__(:socket) end before do expect(socket).to receive(:write).once.and_raise(Mongo::Error::SocketError) expect(socket).to receive(:write).and_call_original monitor.scan! end it 'retries the ismaster', if: standalone? do expect(monitor.description).to be_standalone end it 'retries the ismaster', if: replica_set? do expect(monitor.description).to be_primary end it 'retries the ismaster', if: sharded? do expect(monitor.description).to be_mongos end end context 'when the ismaster command succeeds' do let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS) end before do monitor.scan! end it 'updates the server description', if: standalone? do expect(monitor.description).to be_standalone end it 'updates the server description', if: replica_set? do expect(monitor.description).to be_primary end it 'updates the server description', if: sharded? do expect(monitor.description).to be_mongos end end context 'when the ismaster command fails' do context 'when no server is running on the address' do let(:bad_address) do Mongo::Address.new('127.0.0.1:27050') end let(:monitor) do described_class.new(bad_address, listeners) end before do monitor.scan! end it 'keeps the server unknown' do expect(monitor.description).to be_unknown end end context 'when the socket gets an exception' do let(:bad_address) do default_address end let(:monitor) do described_class.new(bad_address, listeners) end let(:socket) do monitor.connection.connect! monitor.connection.__send__(:socket) end before do expect(socket).to receive(:write).twice.and_raise(Mongo::Error::SocketError) monitor.scan! end it 'keeps the server unknown' do expect(monitor.description).to be_unknown end it 'disconnects the connection' do expect(monitor.connection).to_not be_connected end end end end describe '#heartbeat_frequency' do context 'when an option is provided' do let(:monitor) do described_class.new(address, listeners, :heartbeat_frequency => 5) end it 'returns the option' do expect(monitor.heartbeat_frequency).to eq(5) end end context 'when no option is provided' do let(:monitor) do described_class.new(address, listeners) end it 'defaults to 10' do expect(monitor.heartbeat_frequency).to eq(10) end end end describe '#run!' do let(:monitor) do described_class.new(address, listeners, :heartbeat_frequency => 1) end before do monitor.run! sleep(1) end it 'refreshes the server on the provided interval' do expect(monitor.description).to_not be_nil end end describe '#restart!' do let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS) end let!(:thread) do monitor.run! end context 'when the monitor is already running' do it 'does not create a new thread' do expect(monitor.restart!).to be(thread) end end context 'when the monitor is not already running' do before do monitor.stop! sleep(1) end it 'creates a new thread' do expect(monitor.restart!).not_to be(thread) end end end describe '#stop' do let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS) end let!(:thread) do monitor.run! end before do expect(monitor.connection).to receive(:disconnect!).and_call_original monitor.stop! sleep(1) end it 'kills the monitor thread' do expect(thread.stop?).to be(true) end end describe '#connection' do context 'when there is a connect_timeout option set' do let(:connect_timeout) do 1 end let(:monitor) do described_class.new(address, listeners, TEST_OPTIONS.merge(connect_timeout: connect_timeout)) end it 'sets the value as the timeout on the connection' do expect(monitor.connection.timeout).to eq(connect_timeout) end it 'set the value as the timeout on the socket' do monitor.connection.connect! expect(monitor.connection.send(:socket).timeout).to eq(connect_timeout) end end end end mongo-2.5.1/spec/mongo/server/description_spec.rb0000644000004100000410000004656513257253113022151 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Description do let(:replica) do { 'setName' => 'mongodb_set', 'ismaster' => true, 'secondary' => false, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'arbiters' => [ '127.0.0.1:27120' ], 'primary' => authorized_primary.address.to_s, 'tags' => { 'rack' => 'a' }, 'me' => '127.0.0.1:27019', 'maxBsonObjectSize' => 16777216, 'maxMessageSizeBytes' => 48000000, 'maxWriteBatchSize' => 1000, 'maxWireVersion' => 2, 'minWireVersion' => 0, 'localTime' => Time.now, 'lastWrite' => { 'lastWriteDate' => Time.now }, 'logicalSessionTimeoutMinutes' => 7, 'operationTime' => 1, '$clusterTime' => 1, 'ok' => 1 } end let(:address) do Mongo::Address.new(authorized_primary.address.to_s) end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:topology) do double('topology') end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(topology) allow(cl).to receive(:app_metadata).and_return(app_metadata) end end describe '#arbiter?' do context 'when the server is an arbiter' do let(:description) do described_class.new(address, { 'arbiterOnly' => true, 'setName' => 'test' }) end it 'returns true' do expect(description).to be_arbiter end end context 'when the server is not an arbiter' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_arbiter end end end describe '#arbiters' do context 'when the replica set has arbiters' do let(:description) do described_class.new(address, replica) end it 'returns the arbiters' do expect(description.arbiters).to eq([ '127.0.0.1:27120' ]) end end context 'when the replica set has no arbiters' do let(:description) do described_class.new(address, {}) end it 'returns an empty array' do expect(description.arbiters).to be_empty end end context 'when the addresses are not lowercase' do let(:config) do replica.merge( { 'arbiters' => [ 'SERVER:27017' ], } ) end let(:description) do described_class.new(address, config) end it 'normalizes the addresses to lowercase' do expect(description.arbiters).to eq(['server:27017']) end end end describe '#ghost?' do context 'when the server is a ghost' do let(:config) do { 'isreplicaset' => true } end let(:description) do described_class.new(address, config) end it 'returns true' do expect(description).to be_ghost end end context 'when the server is not a ghost' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_ghost end end end describe '#hidden?' do context 'when the server is hidden' do let(:description) do described_class.new(address, { 'hidden' => true }) end it 'returns true' do expect(description).to be_hidden end end context 'when the server is not hidden' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_hidden end end end describe '#hosts' do let(:description) do described_class.new(address, replica) end it 'returns all the hosts in the replica set' do expect(description.hosts).to eq([ '127.0.0.1:27018', '127.0.0.1:27019' ]) end context 'when the addresses are not lowercase' do let(:config) do replica.merge( { 'hosts' => [ 'SERVER:27017' ], } ) end let(:description) do described_class.new(address, config) end it 'normalizes the addresses to lowercase' do expect(description.hosts).to eq(['server:27017']) end end end describe '#max_bson_object_size' do let(:description) do described_class.new(address, replica) end it 'returns the value' do expect(description.max_bson_object_size).to eq(16777216) end end describe '#max_message_size' do let(:description) do described_class.new(address, replica) end it 'returns the value' do expect(description.max_message_size).to eq(48000000) end end describe '#max_write_batch_size' do let(:description) do described_class.new(address, replica) end it 'returns the value' do expect(description.max_write_batch_size).to eq(1000) end end describe '#max_wire_version' do context 'when the max wire version is provided' do let(:description) do described_class.new(address, replica) end it 'returns the value' do expect(description.max_wire_version).to eq(2) end end context 'when the max wire version is not provided' do let(:description) do described_class.new(address, {}) end it 'returns the default' do expect(description.max_wire_version).to eq(0) end end end describe '#min_wire_version' do context 'when the min wire version is provided' do let(:description) do described_class.new(address, replica) end it 'returns the value' do expect(description.min_wire_version).to eq(0) end end context 'when the min wire version is not provided' do let(:description) do described_class.new(address, {}) end it 'returns the default' do expect(description.min_wire_version).to eq(0) end end end describe '#tags' do context 'when the server has tags' do let(:description) do described_class.new(address, replica) end it 'returns the tags' do expect(description.tags).to eq(replica['tags']) end end context 'when the server does not have tags' do let(:config) do { 'ismaster' => true } end let(:description) do described_class.new(address, config) end it 'returns an empty hash' do expect(description.tags).to eq({}) end end end describe '#mongos?' do context 'when the server is a mongos' do let(:config) do { 'msg' => 'isdbgrid', 'ismaster' => true } end let(:description) do described_class.new(address, config) end it 'returns true' do expect(description).to be_mongos end end context 'when the server is not a mongos' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_mongos end end end describe '#passive?' do context 'when the server is passive' do let(:description) do described_class.new(address, { 'passive' => true }) end it 'returns true' do expect(description).to be_passive end end context 'when the server is not passive' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_passive end end end describe '#passives' do context 'when passive servers exists' do let(:description) do described_class.new(address, { 'passives' => [ '127.0.0.1:27025' ] }) end it 'returns a list of the passives' do expect(description.passives).to eq([ '127.0.0.1:27025' ]) end end context 'when no passive servers exist' do let(:description) do described_class.new(address, replica) end it 'returns an empty array' do expect(description.passives).to be_empty end end context 'when the addresses are not lowercase' do let(:config) do replica.merge( { 'passives' => [ 'SERVER:27017' ], } ) end let(:description) do described_class.new(address, config) end it 'normalizes the addresses to lowercase' do expect(description.passives).to eq(['server:27017']) end it 'normalizes the addresses to lowercase' do end end end describe '#primary?' do context 'when the server is not a primary' do let(:description) do described_class.new(address, { 'ismaster' => false }) end it 'returns true' do expect(description).to_not be_primary end end context 'when the server is a primary' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to be_primary end end end describe '#average_round_trip_time' do let(:description) do described_class.new(address, { 'secondary' => false }, 4.5) end it 'defaults to 0' do expect(described_class.new(address).average_round_trip_time).to eq(0) end it 'can be set via the constructor' do expect(description.average_round_trip_time).to eq(4.5) end end describe '#replica_set_name' do context 'when the server is in a replica set' do let(:description) do described_class.new(address, replica) end it 'returns the replica set name' do expect(description.replica_set_name).to eq('mongodb_set') end end context 'when the server is not in a replica set' do let(:description) do described_class.new(address, {}) end it 'returns nil' do expect(description.replica_set_name).to be_nil end end end describe '#secondary?' do context 'when the server is not a secondary' do let(:description) do described_class.new(address, { 'secondary' => false }) end it 'returns true' do expect(description).to_not be_secondary end end context 'when the server is a secondary' do let(:description) do described_class.new(address, { 'secondary' => true, 'setName' => 'test' }) end it 'returns false' do expect(description).to be_secondary end end end describe '#servers' do let(:config) do replica.merge({ 'passives' => [ '127.0.0.1:27025' ]}) end let(:description) do described_class.new(address, config) end it 'returns the hosts + arbiters + passives' do expect(description.servers).to eq( [ '127.0.0.1:27018', '127.0.0.1:27019', '127.0.0.1:27120', '127.0.0.1:27025' ] ) end end describe '#standalone?' do context 'when the server is standalone' do let(:description) do described_class.new(address, { 'ismaster' => true, 'ok' => 1 }) end it 'returns true' do expect(description).to be_standalone end end context 'when the server is part of a replica set' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description).to_not be_standalone end end end describe '#server_type' do context 'when the server is an arbiter' do let(:description) do described_class.new(address, { 'arbiterOnly' => true, 'setName' => 'test' }) end it 'returns :arbiter' do expect(description.server_type).to eq(:arbiter) end end context 'when the server is a ghost' do let(:description) do described_class.new(address, { 'isreplicaset' => true }) end it 'returns :ghost' do expect(description.server_type).to eq(:ghost) end end context 'when the server is a mongos' do let(:config) do { 'msg' => 'isdbgrid', 'ismaster' => true } end let(:description) do described_class.new(address, config) end it 'returns :sharded' do expect(description.server_type).to eq(:sharded) end end context 'when the server is a primary' do let(:description) do described_class.new(address, replica) end it 'returns :primary' do expect(description.server_type).to eq(:primary) end end context 'when the server is a secondary' do let(:description) do described_class.new(address, { 'secondary' => true, 'setName' => 'test' }) end it 'returns :secondary' do expect(description.server_type).to eq(:secondary) end end context 'when the server is standalone' do let(:description) do described_class.new(address, { 'ismaster' => true, 'ok' => 1 }) end it 'returns :standalone' do expect(description.server_type).to eq(:standalone) end end context 'when the description has no configuration' do let(:description) do described_class.new(address) end it 'returns :unknown' do expect(description.server_type).to eq(:unknown) end end end describe '#unknown?' do context 'when the description has no configuration' do let(:description) do described_class.new(address) end it 'returns true' do expect(description).to be_unknown end end context 'when the command was not ok' do let(:description) do described_class.new(address, { 'ok' => 0 }) end it 'returns true' do expect(description).to be_unknown end end context 'when the description has a configuration' do let(:config) do { 'hosts' => [ '127.0.0.1:27019', '127.0.0.1:27020' ], 'ok' => 1 } end let(:description) do described_class.new(address, config) end it 'returns false' do expect(description).to_not be_unknown end end end describe '#is_server?' do let(:listeners) do Mongo::Event::Listeners.new end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners) end let(:description) do described_class.new(address, {}) end context 'when the server address matches the description address' do it 'returns true' do expect(description.is_server?(server)).to be(true) end end context 'when the server address does not match the description address' do let(:other_address) do Mongo::Address.new('127.0.0.1:27020') end let(:server) do Mongo::Server.new(other_address, cluster, monitoring, listeners) end it 'returns false' do expect(description.is_server?(server)).to be(false) end end end describe '#me_mismatch?' do let(:description) do described_class.new(address, config) end context 'when the server address matches the me field' do let(:config) do replica.merge('me' => address.to_s) end it 'returns false' do expect(description.me_mismatch?).to be(false) end end context 'when the server address does not match the me field' do let(:config) do replica.merge('me' => 'localhost:27020') end it 'returns true' do expect(description.me_mismatch?).to be(true) end end context 'when there is no me field' do let(:config) do replica.tap do |r| r.delete('me') end end it 'returns false' do expect(description.me_mismatch?).to be(false) end end end describe '#lists_server?' do let(:description) do described_class.new(address, replica) end let(:server_address) do Mongo::Address.new('127.0.0.1:27018') end let(:listeners) do Mongo::Event::Listeners.new end let(:server) do Mongo::Server.new(server_address, cluster, monitoring, listeners) end context 'when the server is included in the description hosts list' do it 'returns true' do expect(description.lists_server?(server)).to be(true) end end context 'when the server is not included in the description hosts list' do let(:server_address) do Mongo::Address.new('127.0.0.1:27017') end it 'returns false' do expect(description.lists_server?(server)).to be(false) end end end describe '#replica_set_member?' do context 'when the description is from a mongos' do let(:config) do { 'msg' => 'isdbgrid', 'ismaster' => true } end let(:description) do described_class.new(address, config) end it 'returns false' do expect(description.replica_set_member?).to be(false) end end context 'when the description is from a standalone' do let(:description) do described_class.new(address, { 'ismaster' => true, 'ok' => 1 }) end it 'returns false' do expect(description.replica_set_member?).to be(false) end end context 'when the description is from a replica set member' do let(:description) do described_class.new(address, replica) end it 'returns true' do expect(description.replica_set_member?).to be(true) end end end describe '#logical_session_timeout_minutes' do context 'when a logical session timeout value is in the config' do let(:description) do described_class.new(address, replica) end it 'returns the logical session timeout value' do expect(description.logical_session_timeout).to eq(7) end end context 'when a logical session timeout value is not in the config' do let(:description) do described_class.new(address, { 'ismaster' => true, 'ok' => 1 }) end it 'returns nil' do expect(description.logical_session_timeout).to be(nil) end end end describe '#==' do let(:description) do described_class.new(address, replica) end let(:other) do described_class.new(address, replica.merge( 'localTime' => 1, 'lastWrite' => { 'lastWriteDate' => 1 }, 'operationTime' => 2, '$clusterTime' => 2 )) end it 'excludes certain fields' do expect(description == other).to be(true) end context 'when the classes do not match' do let(:description) do described_class.new(address, replica) end it 'returns false' do expect(description == Array.new).to be(false) end end context 'when the configs match' do let(:description) do described_class.new(address, replica) end let(:other) do described_class.new(address, replica) end it 'returns true' do expect(description == other).to be(true) end end context 'when the configs do not match' do let(:description) do described_class.new(address, replica) end let(:other) do described_class.new(address, { 'ismaster' => true, 'ok' => 1 }) end it 'returns false' do expect(description == other).to be(false) end end end end mongo-2.5.1/spec/mongo/server/monitor/0000755000004100000410000000000013257253113017736 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server/monitor/connection_spec.rb0000644000004100000410000000545213257253113023442 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Monitor::Connection do let(:client) do authorized_client.with(options) end let(:address) do client.cluster.next_primary.address end let(:cluster) do double('cluster').tap do |cl| allow(cl).to receive(:topology).and_return(double('topology')) allow(cl).to receive(:app_metadata).and_return(Mongo::Cluster::AppMetadata.new(authorized_client.cluster)) end end let(:server) do Mongo::Server.new(address, cluster, Mongo::Monitoring.new(monitoring: false), Mongo::Event::Listeners.new, options) end let(:connection) do server.monitor.connection end after do client.close end context 'when a connect_timeout is in the options' do context 'when a socket_timeout is in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: 3, socket_timeout: 5) end before do connection.connect! end it 'uses the connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(3) end it 'uses the connect_timeout as the socket_timeout' do expect(connection.send(:socket).timeout).to eq(3) end end context 'when a socket_timeout is not in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: 3, socket_timeout: nil) end before do connection.connect! end it 'uses the connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(3) end it 'uses the connect_timeout as the socket_timeout' do expect(connection.send(:socket).timeout).to eq(3) end end end context 'when a connect_timeout is not in the options' do context 'when a socket_timeout is in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: nil, socket_timeout: 5) end before do connection.connect! end it 'uses the default connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(10) end it 'uses the connect_timeout as the socket_timeout' do expect(connection.send(:socket).timeout).to eq(10) end end context 'when a socket_timeout is not in the options' do let(:options) do TEST_OPTIONS.merge(connect_timeout: nil, socket_timeout: nil) end before do connection.connect! end it 'uses the default connect_timeout for the address' do expect(connection.address.send(:connect_timeout)).to eq(10) end it 'uses the connect_timeout as the socket_timeout' do expect(connection.send(:socket).timeout).to eq(10) end end end end mongo-2.5.1/spec/mongo/server/connection_pool/0000755000004100000410000000000013257253113021437 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server/connection_pool/queue_spec.rb0000644000004100000410000001157413257253113024132 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::ConnectionPool::Queue do describe '#dequeue' do let(:connection) do double('connection') end let(:queue) do described_class.new(:max_pool_size => 1) { connection } end context 'when the queue is empty' do context 'when the max size is reached' do it 'raises a timeout error' do expect { queue.dequeue queue.dequeue }.to raise_error(Timeout::Error) end end context 'when the max size is not reached' do it 'creates a new connecection' do expect(queue.dequeue).to eq(connection) end end end context 'when waiting for a connection to be enqueued' do before do allow(connection).to receive(:record_checkin!).and_return(connection) Thread.new do sleep(0.5) queue.enqueue(connection) end.join end it 'returns the enqueued connection' do expect(queue.dequeue).to eq(connection) end end end describe '#disconnect!' do let(:connection) do double('connection') end let(:queue) do described_class.new(:max_pool_size => 1) { connection } end it 'disconnects all connections in the queue' do expect(connection).to receive(:disconnect!) queue.disconnect! end end describe '#enqueue' do let(:connection) do double('connection').tap do |con| allow(con).to receive(:record_checkin!).and_return(con) end end let(:queue) do described_class.new { connection } end before do queue.enqueue(connection) end it 'adds the connection to the queue' do expect(queue.dequeue).to eq(connection) end end describe '#initialize' do context 'when a min size is provided' do let(:queue) do described_class.new(:min_pool_size => 2) { double('connection') } end it 'creates the queue with the minimum connections' do expect(queue.size).to eq(2) end it 'does not use the same objects in the queue' do expect(queue.dequeue).to_not equal(queue.dequeue) end end context 'when no min size is provided' do let(:queue) do described_class.new { double('connection') } end it 'creates the queue with the number of default connections' do expect(queue.size).to eq(1) end end end describe '#inspect' do let(:queue) do described_class.new(:min_pool_size => 2) { double('connection') } end it 'includes the object id' do expect(queue.inspect).to include(queue.object_id.to_s) end it 'includes the min size' do expect(queue.inspect).to include('min_size=2') end it 'includes the max size' do expect(queue.inspect).to include('max_size=5') end it 'includes the wait timeout' do expect(queue.inspect).to include('wait_timeout=1') end it 'includes the current size' do expect(queue.inspect).to include('current_size=2') end end describe '#max_size' do context 'when a max pool size option is provided' do let(:queue) do described_class.new(:max_pool_size => 3) { double('connection') } end it 'returns the max size' do expect(queue.max_size).to eq(3) end end context 'when no pool size option is provided' do let(:queue) do described_class.new { double('connection') } end it 'returns the default size' do expect(queue.max_size).to eq(5) end end end describe '#wait_timeout' do context 'when the wait timeout option is provided' do let(:queue) do described_class.new(:wait_queue_timeout => 3) { double('connection') } end it 'returns the wait timeout' do expect(queue.wait_timeout).to eq(3) end end context 'when the wait timeout option is not provided' do let(:queue) do described_class.new { double('connection') } end it 'returns the default wait timeout' do expect(queue.wait_timeout).to eq(1) end end end describe 'close_stale_sockets!!' do let(:queue) do described_class.new(max_pool_size: 2, max_idle_time: 0.5) do double('connection').tap do |con| expect(con).to receive(:disconnect!).and_return(true) allow(con).to receive(:record_checkin!) do allow(con).to receive(:last_checkin).and_return(Time.now) con end end end end let(:connection) do queue.dequeue end before do queue.enqueue(connection) expect(connection).to receive(:connect!).and_return(true) sleep(0.5) queue.close_stale_sockets! end it 'disconnects and reconnects up to min_size the expired connections' do expect(queue.size).to eq(1) end end end mongo-2.5.1/spec/mongo/server/description/0000755000004100000410000000000013257253113020572 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server/description/features_spec.rb0000644000004100000410000001154213257253113023752 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Description::Features do let(:features) do described_class.new(wire_versions, default_address) end describe '#initialize' do context 'when the server wire version range is the same' do let(:wire_versions) do 0..3 end it 'sets the server wire version range' do expect(features.server_wire_versions).to eq(0..3) end end context 'when the server wire version range min is higher' do let(:wire_versions) do described_class::DRIVER_WIRE_VERSIONS.max+1..described_class::DRIVER_WIRE_VERSIONS.max+2 end it 'raises an exception' do expect { features.check_driver_support! }.to raise_error(Mongo::Error::UnsupportedFeatures) end end context 'when the server wire version range max is higher' do let(:wire_versions) do 0..4 end it 'sets the server wire version range' do expect(features.server_wire_versions).to eq(0..4) end end context 'when the server wire version range max is lower' do let(:wire_versions) do described_class::DRIVER_WIRE_VERSIONS.min-2..described_class::DRIVER_WIRE_VERSIONS.min-1 end it 'raises an exception' do expect { features.check_driver_support! }.to raise_error(Mongo::Error::UnsupportedFeatures) end end context 'when the server wire version range max is lower' do let(:wire_versions) do 0..2 end it 'sets the server wire version range' do expect(features.server_wire_versions).to eq(0..2) end end end describe '#collation_enabled?' do context 'when the wire range includes 5' do let(:wire_versions) do 0..5 end it 'returns true' do expect(features).to be_collation_enabled end end context 'when the wire range does not include 5' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_collation_enabled end end end describe '#max_staleness_enabled?' do context 'when the wire range includes 5' do let(:wire_versions) do 0..5 end it 'returns true' do expect(features).to be_max_staleness_enabled end end context 'when the wire range does not include 5' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_max_staleness_enabled end end end describe '#find_command_enabled?' do context 'when the wire range includes 4' do let(:wire_versions) do 0..4 end it 'returns true' do expect(features).to be_find_command_enabled end end context 'when the wire range does not include 4' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_find_command_enabled end end end describe '#list_collections_enabled?' do context 'when the wire range includes 3' do let(:wire_versions) do 0..3 end it 'returns true' do expect(features).to be_list_collections_enabled end end context 'when the wire range does not include 3' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_list_collections_enabled end end end describe '#list_indexes_enabled?' do context 'when the wire range includes 3' do let(:wire_versions) do 0..3 end it 'returns true' do expect(features).to be_list_indexes_enabled end end context 'when the wire range does not include 3' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_list_indexes_enabled end end end describe '#write_command_enabled?' do context 'when the wire range includes 2' do let(:wire_versions) do 0..3 end it 'returns true' do expect(features).to be_write_command_enabled end end context 'when the wire range does not include 2' do let(:wire_versions) do 0..1 end it 'returns false' do expect { features.check_driver_support! }.to raise_exception(Mongo::Error::UnsupportedFeatures) end end end describe '#scram_sha_1_enabled?' do context 'when the wire range includes 3' do let(:wire_versions) do 0..3 end it 'returns true' do expect(features).to be_scram_sha_1_enabled end end context 'when the wire range does not include 3' do let(:wire_versions) do 0..2 end it 'returns false' do expect(features).to_not be_scram_sha_1_enabled end end end end mongo-2.5.1/spec/mongo/server/description/inspector/0000755000004100000410000000000013257253113022600 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server/description/inspector/primary_elected_spec.rb0000644000004100000410000000400113257253113027302 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Description::Inspector::PrimaryElected do let(:listeners) do Mongo::Event::Listeners.new end let(:inspection) do described_class.new(listeners) end let(:address) do Mongo::Address.new('127.0.0.1:27017') end describe '#run' do let(:config) do { 'ismaster' => false, 'secondary' => true, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'setName' => 'test' } end let(:description) do Mongo::Server::Description.new(address, config, listeners) end let(:updated) do Mongo::Server::Description.new(address, new_config, listeners) end let(:listener) do double('listener') end before do listeners.add_listener(Mongo::Event::PRIMARY_ELECTED, listener) end context 'when the server becomes primary' do let(:new_config) do { 'ismaster' => true, 'secondary' => false, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'setName' => 'test' } end it 'fires a primary elected event' do expect(listener).to receive(:handle).with(updated) inspection.run(description, updated) end end context 'when the server stays the same' do let(:new_config) do { 'ismaster' => false, 'secondary' => true, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'setName' => 'test' } end it 'fires no event' do expect(listener).to_not receive(:handle) inspection.run(description, updated) end end context 'when the server becomes mongos' do let(:new_config) do { 'ismaster' => true, 'secondary' => false, 'msg' => 'isdbgrid' } end it 'fires a primary elected event' do expect(listener).to receive(:handle).with(updated) inspection.run(description, updated) end end end end mongo-2.5.1/spec/mongo/server/description/inspector/description_changed_spec.rb0000644000004100000410000000326513257253113030141 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Server::Description::Inspector::DescriptionChanged do let(:listeners) do Mongo::Event::Listeners.new end let(:inspection) do described_class.new(listeners) end let(:address) do Mongo::Address.new('127.0.0.1:27017') end describe '.run' do let(:config) do { 'ismaster' => true, 'secondary' => false, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'setName' => 'test' } end let(:description) do Mongo::Server::Description.new(address, config, listeners) end let(:updated) do Mongo::Server::Description.new(address, new_config, listeners) end let(:listener) do double('listener') end before do listeners.add_listener(Mongo::Event::DESCRIPTION_CHANGED, listener) end context 'when there is no change' do let(:new_config) do { 'ismaster' => true, 'secondary' => false, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27019' ], 'setName' => 'test' } end it 'does not fire a description changed event' do expect(listener).to_not receive(:handle) inspection.run(description, updated) end end context 'when there is a change' do let(:new_config) do { 'ismaster' => true, 'secondary' => false, 'hosts' => [ '127.0.0.1:27018', '127.0.0.1:27020' ], 'setName' => 'test' } end it 'fires a description changed event' do expect(listener).to receive(:handle) inspection.run(description, updated) end end end end mongo-2.5.1/spec/mongo/cluster_spec.rb0000644000004100000410000004151613257253113017770 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cluster do let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:cluster) do described_class.new(ADDRESSES, monitoring, TEST_OPTIONS) end describe '#==' do context 'when the other is a cluster' do context 'when the addresses are the same' do context 'when the options are the same' do let(:other) do described_class.new(ADDRESSES, monitoring, TEST_OPTIONS) end it 'returns true' do expect(cluster).to eq(other) end end context 'when the options are not the same' do let(:other) do described_class.new([ '127.0.0.1:27017' ], monitoring, TEST_OPTIONS.merge(:replica_set => 'test')) end it 'returns false' do expect(cluster).to_not eq(other) end end end context 'when the addresses are not the same' do let(:other) do described_class.new([ '127.0.0.1:27018' ], monitoring, TEST_OPTIONS) end it 'returns false' do expect(cluster).to_not eq(other) end end end context 'when the other is not a cluster' do it 'returns false' do expect(cluster).to_not eq('test') end end end describe '#has_readable_server?' do let(:selector) do Mongo::ServerSelector.get(mode: :primary) end it 'delegates to the topology' do expect(cluster.has_readable_server?).to eq(cluster.topology.has_readable_server?(cluster)) end end describe '#has_writable_server?' do it 'delegates to the topology' do expect(cluster.has_writable_server?).to eq(cluster.topology.has_writable_server?(cluster)) end end describe '#inspect' do let(:preference) do Mongo::ServerSelector.get(ServerSelector::PRIMARY) end it 'displays the cluster seeds and topology' do expect(cluster.inspect).to include('topology') expect(cluster.inspect).to include('servers') end end describe '#replica_set_name' do let(:preference) do Mongo::ServerSelector.get(ServerSelector::PRIMARY) end context 'when the option is provided' do let(:cluster) do described_class.new( [ '127.0.0.1:27017' ], monitoring, TEST_OPTIONS.merge(:connect => :replica_set, :replica_set => 'testing') ) end it 'returns the name' do expect(cluster.replica_set_name).to eq('testing') end end context 'when the option is not provided' do let(:cluster) do described_class.new([ '127.0.0.1:27017' ], monitoring, TEST_OPTIONS.merge(connect: :direct).delete_if { |k| k == :replica_set }) end it 'returns nil' do expect(cluster.replica_set_name).to be_nil end end end describe '#scan!' do let(:preference) do Mongo::ServerSelector.get(ServerSelector::PRIMARY) end let(:known_servers) do cluster.instance_variable_get(:@servers) end before do expect(known_servers.first).to receive(:scan!).and_call_original end it 'returns true' do expect(cluster.scan!).to be true end end describe '#servers' do context 'when topology is single', if: single_seed? do context 'when the server is a mongos', if: single_mongos? do it 'returns the mongos' do expect(cluster.servers.size).to eq(1) end end context 'when the server is a replica set member', if: single_rs_member? do it 'returns the replica set member' do expect(cluster.servers.size).to eq(1) end end end context 'when the cluster has no servers' do let(:servers) do [nil] end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@topology, topology) end context 'when topology is Single' do let(:topology) do Mongo::Cluster::Topology::Single.new({}, monitoring) end it 'returns an empty array' do expect(cluster.servers).to eq([]) end end context 'when topology is ReplicaSet' do let(:topology) do Mongo::Cluster::Topology::ReplicaSet.new({}, monitoring) end it 'returns an empty array' do expect(cluster.servers).to eq([]) end end context 'when topology is Sharded' do let(:topology) do Mongo::Cluster::Topology::Sharded.new({}, monitoring) end it 'returns an empty array' do expect(cluster.servers).to eq([]) end end context 'when topology is Unknown' do let(:topology) do Mongo::Cluster::Topology::Unknown.new({}, monitoring) end it 'returns an empty array' do expect(cluster.servers).to eq([]) end end end end describe '#add' do context 'when topology is Single' do let(:topology) do Mongo::Cluster::Topology::Single.new({}) end before do cluster.add('a') end it 'does not add discovered servers to the cluster' do expect(cluster.servers[0].address.seed).to_not eq('a') end end end describe '#disconnect!' do let(:known_servers) do cluster.instance_variable_get(:@servers) end let(:periodic_executor) do cluster.instance_variable_get(:@periodic_executor) end before do known_servers.each do |server| expect(server).to receive(:disconnect!).and_call_original end expect(periodic_executor).to receive(:stop!).and_call_original end it 'disconnects each server and the cursor reaper and returns true' do expect(cluster.disconnect!).to be(true) end end describe '#reconnect!' do let(:periodic_executor) do cluster.instance_variable_get(:@periodic_executor) end before do cluster.servers.each do |server| expect(server).to receive(:reconnect!).and_call_original end expect(periodic_executor).to receive(:restart!).and_call_original end it 'reconnects each server and the cursor reaper and returns true' do expect(cluster.reconnect!).to be(true) end end describe '#remove' do let(:address_a) do Mongo::Address.new('127.0.0.1:27017') end let(:address_b) do Mongo::Address.new('127.0.0.1:27018') end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:server_a) do Mongo::Server.new(address_a, cluster, monitoring, Mongo::Event::Listeners.new) end let(:server_b) do Mongo::Server.new(address_b, cluster, monitoring, Mongo::Event::Listeners.new) end let(:servers) do [ server_a, server_b ] end let(:addresses) do [ address_a, address_b ] end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@addresses, addresses) cluster.remove('127.0.0.1:27017') end it 'removes the host from the list of servers' do expect(cluster.instance_variable_get(:@servers)).to eq([server_b]) end it 'removes the host from the list of addresses' do expect(cluster.instance_variable_get(:@addresses)).to eq([address_b]) end end describe '#add_hosts' do let(:servers) do [nil] end let(:hosts) do ["127.0.0.1:27018"] end let(:description) do Mongo::Server::Description.new(double('address'), { 'hosts' => hosts }) end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@topology, topology) end context 'when the topology allows servers to be added' do let(:topology) do double('topology').tap do |t| allow(t).to receive(:add_hosts?).and_return(true) end end it 'adds the servers' do expect(cluster).to receive(:add).once cluster.add_hosts(description) end end context 'when the topology does not allow servers to be added' do let(:topology) do double('topology').tap do |t| allow(t).to receive(:add_hosts?).and_return(false) end end it 'does not add the servers' do expect(cluster).not_to receive(:add) cluster.add_hosts(description) end end end describe '#remove_hosts' do let(:listeners) do Mongo::Event::Listeners.new end let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:server) do Mongo::Server.new(address, cluster, monitoring, listeners) end let(:servers) do [ server ] end let(:hosts) do ["127.0.0.1:27018"] end let(:description) do Mongo::Server::Description.new(double('address'), { 'hosts' => hosts }) end context 'when the topology allows servers to be removed' do context 'when the topology allows a specific server to be removed' do let(:topology) do double('topology').tap do |t| allow(t).to receive(:remove_hosts?).and_return(true) allow(t).to receive(:remove_server?).and_return(true) end end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@topology, topology) end it 'removes the servers' do expect(cluster).to receive(:remove).once cluster.remove_hosts(description) end end context 'when the topology does not allow a specific server to be removed' do let(:topology) do double('topology').tap do |t| allow(t).to receive(:remove_hosts?).and_return(true) allow(t).to receive(:remove_server?).and_return(false) end end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@topology, topology) end it 'removes the servers' do expect(cluster).not_to receive(:remove) cluster.remove_hosts(description) end end end context 'when the topology does not allow servers to be removed' do let(:topology) do double('topology').tap do |t| allow(t).to receive(:remove_hosts?).and_return(false) end end before do cluster.instance_variable_set(:@servers, servers) cluster.instance_variable_set(:@topology, topology) end it 'does not remove the servers' do expect(cluster).not_to receive(:remove) cluster.remove_hosts(description) end end end describe '#next_primary' do let(:cluster) do authorized_client.cluster end let(:primary_candidates) do if cluster.single? cluster.servers elsif cluster.sharded? cluster.servers else cluster.servers.select { |s| s.primary? } end end it 'always returns the primary, mongos, or standalone' do expect(primary_candidates).to include(cluster.next_primary) end end describe '#app_metadata' do it 'returns an AppMetadata object' do expect(cluster.app_metadata).to be_a(Mongo::Cluster::AppMetadata) end context 'when the client has an app_name set' do let(:cluster) do authorized_client.with(app_name: 'reports').cluster end it 'constructs an AppMetadata object with the app_name' do expect(cluster.app_metadata.send(:full_client_document)[:application]).to eq('name' => 'reports') end end context 'when the client does not have an app_name set' do let(:cluster) do authorized_client.cluster end it 'constructs an AppMetadata object with no app_name' do expect(cluster.app_metadata.send(:full_client_document)[:application]).to be_nil end end end describe '#logical_session_timeout' do let(:listeners) do Mongo::Event::Listeners.new end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:server_one) do Mongo::Server.new(default_address, cluster, monitoring, listeners) end let(:server_two) do Mongo::Server.new(default_address, cluster, monitoring, listeners) end let(:servers) do [ server_one, server_two ] end before do allow(cluster).to receive(:servers).and_return(servers) end context 'when one server has a nil logical session timeout value' do before do allow(server_one).to receive(:logical_session_timeout).and_return(7) allow(server_two).to receive(:logical_session_timeout).and_return(nil) end it 'returns nil' do expect(cluster.logical_session_timeout).to be(nil) end end context 'when all servers have a logical session timeout value' do before do allow(server_one).to receive(:logical_session_timeout).and_return(7) allow(server_two).to receive(:logical_session_timeout).and_return(3) end it 'returns the minimum' do expect(cluster.logical_session_timeout).to be(3) end end context 'when no servers have a logical session timeout value' do before do allow(server_one).to receive(:logical_session_timeout).and_return(nil) allow(server_two).to receive(:logical_session_timeout).and_return(nil) end it 'returns nil' do expect(cluster.logical_session_timeout).to be(nil) end end end describe '#cluster_time' do let(:operation) do client.command(ping: 1) end let(:operation_with_session) do client.command({ ping: 1 }, session: session) end let(:second_operation) do client.command({ ping: 1 }, session: session) end it_behaves_like 'an operation updating cluster time' end describe '#update_cluster_time' do let(:cluster) do described_class.new(ADDRESSES, monitoring, TEST_OPTIONS.merge(heartbeat_frequency: 1000)) end let(:result) do double('result', cluster_time: cluster_time_doc) end context 'when the cluster_time variable is nil' do before do cluster.instance_variable_set(:@cluster_time, nil) cluster.update_cluster_time(result) end context 'when the cluster time received is nil' do let(:cluster_time_doc) do nil end it 'does not set the cluster_time variable' do expect(cluster.cluster_time).to be_nil end end context 'when the cluster time received is not nil' do let(:cluster_time_doc) do BSON::Document.new(Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1)) end it 'sets the cluster_time variable to the cluster time doc' do expect(cluster.cluster_time).to eq(cluster_time_doc) end end end context 'when the cluster_time variable has a value' do before do cluster.instance_variable_set(:@cluster_time, BSON::Document.new( Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1))) cluster.update_cluster_time(result) end context 'when the cluster time received is nil' do let(:cluster_time_doc) do nil end it 'does not update the cluster_time variable' do expect(cluster.cluster_time).to eq(BSON::Document.new( Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1))) end end context 'when the cluster time received is not nil' do context 'when the cluster time received is greater than the cluster_time variable' do let(:cluster_time_doc) do BSON::Document.new(Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 2)) end it 'sets the cluster_time variable to the cluster time' do expect(cluster.cluster_time).to eq(cluster_time_doc) end end context 'when the cluster time received is less than the cluster_time variable' do let(:cluster_time_doc) do BSON::Document.new(Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(0, 1)) end it 'does not set the cluster_time variable to the cluster time' do expect(cluster.cluster_time).to eq(BSON::Document.new( Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1))) end end context 'when the cluster time received is equal to the cluster_time variable' do let(:cluster_time_doc) do BSON::Document.new(Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1)) end it 'does not change the cluster_time variable' do expect(cluster.cluster_time).to eq(BSON::Document.new( Mongo::Cluster::CLUSTER_TIME => BSON::Timestamp.new(1, 1))) end end end end end end mongo-2.5.1/spec/mongo/sdam_monitoring_spec.rb0000644000004100000410000000423713257253113021477 0ustar www-datawww-datarequire 'spec_helper' describe 'SDAM Monitoring' do include Mongo::SDAM SDAM_MONITORING_TESTS.each do |file| spec = Mongo::SDAM::Spec.new(file) context(spec.description) do before(:all) do @client = Mongo::Client.new([], heartbeat_frequency: 100, connect_timeout: 0.1) @subscriber = Mongo::SDAMMonitoring::TestSubscriber.new @client.subscribe(Mongo::Monitoring::SERVER_OPENING, @subscriber) @client.subscribe(Mongo::Monitoring::SERVER_CLOSED, @subscriber) @client.subscribe(Mongo::Monitoring::SERVER_DESCRIPTION_CHANGED, @subscriber) @client.subscribe(Mongo::Monitoring::TOPOLOGY_OPENING, @subscriber) @client.subscribe(Mongo::Monitoring::TOPOLOGY_CHANGED, @subscriber) @client.send(:create_from_uri, spec.uri_string) end after(:all) do @client.close end spec.phases.each_with_index do |phase, index| context("Phase: #{index + 1}") do before(:all) do phase.responses.each do |response| # For each response in the phase, we need to change that server's description. server = find_server(@client, response.address) server ||= Mongo::Server.new( Mongo::Address.new(response.address), @client.cluster, @client.instance_variable_get(:@monitoring), @client.cluster.send(:event_listeners), @client.cluster.options ) monitor = server.instance_variable_get(:@monitor) description = monitor.inspector.run(server.description, response.ismaster, 0.5) monitor.instance_variable_set(:@description, description) end end phase.outcome.events.each do |expectation| it "expects a #{expectation.name} to be fired" do fired_event = @subscriber.first_event(expectation.name) expect(fired_event).not_to be_nil expect(fired_event).to match_sdam_monitoring_event(expectation) end end end end end end end mongo-2.5.1/spec/mongo/socket/0000755000004100000410000000000013257253113016231 5ustar www-datawww-datamongo-2.5.1/spec/mongo/socket/ssl_spec.rb0000644000004100000410000003160713257253113020400 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Socket::SSL, if: running_ssl? do let(:address) do default_address end let(:resolver) do address.instance_variable_get(:@resolver) end let(:socket_timeout) do 1 end let(:socket) do resolver.socket(socket_timeout, options) end let(:options) do SSL_OPTIONS end let (:key_string) do File.read(CLIENT_KEY_PEM) end let (:cert_string) do File.read(CLIENT_CERT_PEM) end let (:ca_cert_string) do File.read(CA_PEM) end let(:key_encrypted_string) do File.read(CLIENT_KEY_ENCRYPTED_PEM) end let(:cert_object) do OpenSSL::X509::Certificate.new(cert_string) end let(:key_object) do OpenSSL::PKey.read(key_string) end describe '#connect!' do context 'when a certificate is provided' do context 'when connecting the tcp socket is successful' do before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'when connecting the tcp socket raises an exception' do before do tcp_socket = socket.instance_variable_get(:@tcp_socket) allow(tcp_socket).to receive(:connect).and_raise(Mongo::Error::SocketTimeoutError) end let!(:result) do begin socket.connect! rescue => e e end end it 'raises an exception' do expect(result).to be_a(Mongo::Error::SocketTimeoutError) end end end context 'when a certificate and key are provided as strings' do let(:options) do { :ssl => true, :ssl_cert_string => cert_string, :ssl_key_string => key_string, :ssl_verify => false } end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'when certificate and an encrypted key are provided as strings', if: testing_ssl_locally? do let(:options) do { :ssl => true, :ssl_cert_string => cert_string, :ssl_key_string => key_encrypted_string, :ssl_key_pass_phrase => CLIENT_KEY_PASSPHRASE, :ssl_verify => false } end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'when a certificate and key are provided as objects' do let(:options) do { :ssl => true, :ssl_cert_object => cert_object, :ssl_key_object => key_object, :ssl_verify => false } end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'when the certificate is specified using both a file and a PEM-encoded string' do let(:options) do super().merge( :ssl_cert_string => 'This is a random string, not a PEM-encoded certificate' ) end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_cert_string' do expect(socket).to be_alive end end context 'when the certificate is specified using both a file and an object' do let(:options) do super().merge( :ssl_cert_object => 'This is a string, not a certificate' ) end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_cert_object' do expect(socket).to be_alive end end context 'when the certificate is specified using both a PEM-encoded string and an object' do let(:options) do { :ssl => true, :ssl_cert_string => cert_string, :ssl_cert_object => 'This is a string, not a Certificate', :ssl_key => CLIENT_KEY_PEM, :ssl_verify => false } end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_cert_object' do expect(socket).to be_alive end end context 'when the key is specified using both a file and a PEM-encoded string' do let(:options) do super().merge( :ssl_key_string => 'This is a normal string, not a PEM-encoded key' ) end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_key_string' do expect(socket).to be_alive end end context 'when the key is specified using both a file and an object' do let(:options) do super().merge( :ssl_cert_object => 'This is a string, not a key' ) end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_key_object' do expect(socket).to be_alive end end context 'when the key is specified using both a PEM-encoded string and an object' do let(:options) do { :ssl => true, :ssl_cert => CLIENT_CERT_PEM, :ssl_key_string => key_string, :ssl_key_object => 'This is a string, not a PKey', :ssl_verify => false } end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_key_object' do expect(socket).to be_alive end end context 'when a certificate is passed, but it is not of the right type' do let(:options) do cert = "This is a string, not a X509 Certificate" { :ssl => true, :ssl_cert_object => cert, :ssl_key => CLIENT_KEY_PEM, :ssl_verify => false } end it 'raises a TypeError' do expect{ socket.connect! }.to raise_exception(TypeError) end end context 'when ruby version is < 2.4.1', if: (RUBY_VERSION < '2.4.1' && running_ssl?) do context 'when a key is passed, but it is not of the right type' do let(:options) do key = "This is a string not a key" { :ssl => true, :ssl_key_object => key, :ssl_cert => CLIENT_CERT_PEM, :ssl_verify => false } end it 'raises a TypeError' do expect{ socket.connect! }.to raise_exception(TypeError) end end end # Note that as of MRI 2.4, Creating a socket with the wrong key type raises # a NoMethodError because #private? is attempted to be called on the key. context 'when ruby version is >= 2.4.1', if: (RUBY_VERSION >= '2.4.1' && running_ssl?) do context 'when a key is passed, but it is not of the right type' do let(:options) do key = "This is a string not a key" { :ssl => true, :ssl_key_object => key, :ssl_cert => CLIENT_CERT_PEM, :ssl_verify => false } end it 'raises a NoMethodError' do expect{ socket.connect! }.to raise_exception(NoMethodError) end end end context 'when a bad certificate is provided' do let(:options) do super().merge( :ssl_key => COMMAND_MONITORING_TESTS.first ) end it 'raises an exception' do expect { socket.connect! }.to raise_exception end end context 'when a CA certificate is provided', if: testing_ssl_locally? do context 'as a path to a file' do let(:options) do super().merge( :ssl_ca_cert => CA_PEM, :ssl_verify => true ) end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'as a string containing the PEM-encoded certificate' do let (:options) do super().merge( :ssl_ca_cert_string => ca_cert_string, :ssl_verify => true ) end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'as an array of Certificate objects' do let (:options) do cert = [OpenSSL::X509::Certificate.new(ca_cert_string)] super().merge( :ssl_ca_cert_object => cert, :ssl_verify => true ) end before do socket.connect! end it 'connects to the server' do expect(socket).to be_alive end end context 'both as a file and a PEM-encoded parameter' do let(:options) do super().merge( :ssl_ca_cert => CA_PEM, :ssl_ca_cert_string => 'This is a string, not a certificate', :ssl_verify => true ) end before do socket.connect! end # since the lower priority option is clearly invalid we verify priority by checking that it connects it 'discards the value of :ssl_ca_cert_string' do expect(socket).to be_alive end end context 'both as a file and as object parameter' do let(:options) do super().merge( :ssl_ca_cert => CA_PEM, :ssl_ca_cert_object => 'This is a string, not an array of certificates', :ssl_verify => true ) end before do socket.connect! end it 'discards the value of :ssl_ca_cert_object' do expect(socket).to be_alive end end context 'both as a PEM-encoded string and as object parameter' do let(:options) do cert = File.read(CA_PEM) super().merge( :ssl_ca_cert_string => cert, :ssl_ca_cert_object => 'This is a string, not an array of certificates', :ssl_verify => true ) end before do socket.connect! end it 'discards the value of :ssl_ca_cert_object' do expect(socket).to be_alive end end end context 'when a CA certificate is not provided', if: testing_ssl_locally? do let(:options) do super().merge( :ssl_verify => true ) end before do ENV['SSL_CERT_FILE']= CA_PEM socket.connect! end it 'uses the default cert store' do expect(socket).to be_alive end end context 'when ssl_verify is not specified', if: testing_ssl_locally? do let(:options) do super().merge( :ssl_ca_cert => CA_PEM ).tap { |options| options.delete(:ssl_verify) } end before do socket.connect! end it 'verifies the server certificate' do expect(socket).to be_alive end end context 'when ssl_verify is true', if: testing_ssl_locally? do let(:options) do super().merge( :ssl_ca_cert => CA_PEM, :ssl_verify => true ) end before do socket.connect! end it 'verifies the server certificate' do expect(socket).to be_alive end end context 'when ssl_verify is false' do let(:options) do super().merge( :ssl_ca_cert => 'invalid', :ssl_verify => false ) end before do socket.connect! end it 'does not verify the server certificate' do expect(socket).to be_alive end end end describe '#readbyte' do before do allow_message_expectations_on_nil allow(socket.socket).to receive(:read) do |length| socket_content[0, length] end end context 'with the socket providing "abc"' do let(:socket_content) { "abc" } it 'should return 97 (the byte for "a")' do expect(socket.readbyte).to eq(97) end end context 'with the socket providing "\x00" (NULL_BYTE)' do let(:socket_content) { "\x00" } it 'should return 0' do expect(socket.readbyte).to eq(0) end end context 'with the socket providing no data' do let(:socket_content) { "" } it 'should raise EOFError' do expect { socket.readbyte } .to raise_error(Mongo::Error::SocketError).with_message("EOFError") end end end end mongo-2.5.1/spec/mongo/socket/unix_spec.rb0000644000004100000410000000143113257253113020552 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Socket::Unix do let(:socket) do described_class.new("/tmp/mongodb-27017.sock", 5) end describe '#connect!' do before do socket.connect! end after do socket.close end it 'connects to the server' do expect(socket).to be_alive end end describe '#alive?' do context 'when the socket is connected' do before do socket.connect! end after do socket.close end it 'returns true' do expect(socket).to be_alive end end context 'when the socket is not connected' do before do socket.close end it 'raises error' do expect { socket.alive? }.to raise_error(IOError) end end end end mongo-2.5.1/spec/mongo/shell_examples_spec.rb0000644000004100000410000007367213257253113021324 0ustar www-datawww-datarequire 'spec_helper' describe 'shell examples in Ruby' do let(:client) do authorized_client end before do client[:inventory].drop end after do client[:inventory].drop end context 'insert examples' do before do # Start Example 1 client[:inventory].insert_one({ item: 'canvas', qty: 100, tags: [ 'cotton' ], size: { h: 28, w: 35.5, uom: 'cm' } }) # End Example 1 end context 'example 2' do let(:example) do # Start Example 2 client[:inventory].find(item: 'canvas') # End Example 2 end it 'matches the expected output' do expect(example.count).to eq(1) end end context 'example 3' do let(:example) do # Start Example 3 client[:inventory].insert_many([{ item: 'journal', qty: 25, tags: ['blank', 'red'], size: { h: 14, w: 21, uom: 'cm' } }, { item: 'mat', qty: 85, tags: ['gray'], size: { h: 27.9, w: 35.5, uom: 'cm' } }, { item: 'mousepad', qty: 25, tags: ['gel', 'blue'], size: { h: 19, w: 22.85, uom: 'cm' } } ]) # End Example 3 end it 'matches the expected output' do expect(example.inserted_count).to eq(3) end end end context 'query top-level' do before do # Start Example 6 client[:inventory].insert_many([{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }, { item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }, { item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }, { item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }, { item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' } ]) # End Example 6 end context 'example 7' do let(:example) do # Start Example 7 client[:inventory].find({}) # End Example 7 end it 'matches the expected output' do expect(example.to_a.size).to eq(5) end end context 'example 8' do let(:example) do # Start Example 8 client[:inventory].find # End Example 8 end it 'matches the expected output' do expect(example.to_a.size).to eq(5) end end context 'example 9' do let(:example) do # Start Example 9 client[:inventory].find(status: 'D') # End Example 9 end it 'matches the expected output' do expect(example.to_a.size).to eq(2) end end context 'example 10' do let(:example) do # Start Example 10 client[:inventory].find(status: { '$in' => [ 'A', 'D' ]}) # End Example 10 end it 'matches the expected output' do expect(example.to_a.size).to eq(5) end end context 'example 11' do let(:example) do # Start Example 11 client[:inventory].find(status: 'A', qty: { '$lt' => 30 }) # End Example 11 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 12' do let(:example) do # Start Example 12 client[:inventory].find('$or' => [{ status: 'A' }, { qty: { '$lt' => 30 } } ]) # End Example 12 end it 'matches the expected output' do expect(example.to_a.size).to eq(3) end end context 'example 13' do let(:example) do # Start Example 13 client[:inventory].find(status: 'A', '$or' => [{ qty: { '$lt' => 30 } }, { item: { '$regex' => BSON::Regexp::Raw.new('^p') } } ]) # End Example 13 end it 'matches the expected output' do expect(example.to_a.size).to eq(2) end end end context 'query embedded documents' do before do # Start Example 14 client[:inventory].insert_many([ { item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }, { item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }, { item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }, { item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }, { item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' } ]) # End Example 14 end context 'example 15' do let(:example) do # Start Example 15 client[:inventory].find(size: { h: 14, w: 21, uom: 'cm' }) # End Example 15 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 16' do let(:example) do # Start Example 16 client[:inventory].find(size: { h: 21, w: 14, uom: 'cm' }) # End Example 16 end it 'matches the expected output' do expect(example.to_a.size).to eq(0) end end context 'example 17' do let(:example) do # Start Example 17 client[:inventory].find('size.uom' => 'in') # End Example 17 end it 'matches the expected output' do expect(example.to_a.size).to eq(2) end end context 'example 18' do let(:example) do # Start Example 18 client[:inventory].find('size.h' => { '$lt' => 15 }) # End Example 18 end it 'matches the expected output' do expect(example.to_a.size).to eq(4) end end context 'example 19' do let(:example) do # Start Example 19 client[:inventory].find('size.h' => { '$lt' => 15 }, 'size.uom' => 'in', 'status' => 'D') # End Example 19 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end end context 'query arrays' do before do # Start Example 20 client[:inventory].insert_many([{ item: 'journal', qty: 25, tags: ['blank', 'red'], dim_cm: [ 14, 21 ] }, { item: 'notebook', qty: 50, tags: ['red', 'blank'], dim_cm: [ 14, 21 ] }, { item: 'paper', qty: 100, tags: ['red', 'blank', 'plain'], dim_cm: [ 14, 21 ] }, { item: 'planner', qty: 75, tags: ['blank', 'red'], dim_cm: [ 22.85, 30 ] }, { item: 'postcard', qty: 45, tags: ['blue'], dim_cm: [ 10, 15.25 ] } ]) # End Example 20 end context 'example 21' do let(:example) do # Start Example 21 client[:inventory].find(tags: ['red', 'blank']) # End Example 21 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 22' do let(:example) do # Start Example 22 client[:inventory].find(tags: { '$all' => ['red', 'blank'] }) # End Example 22 end it 'matches the expected output' do expect(example.to_a.size).to eq(4) end end context 'example 23' do let(:example) do # Start Example 23 client[:inventory].find(tags: 'red') # End Example 23 end it 'matches the expected output' do expect(example.count).to eq(4) end end context 'example 24' do let(:example) do # Start Example 24 client[:inventory].find(dim_cm: { '$gt' => 25 }) # End Example 24 end it 'matches the expected output' do expect(example.count).to eq(1) end end context 'example 25' do let(:example) do # Start Example 25 client[:inventory].find(dim_cm: { '$gt' => 15, '$lt' => 20 }) # End Example 25 end it 'matches the expected output' do expect(example.count).to eq(4) end end context 'example 26' do let(:example) do # Start Example 26 client[:inventory].find(dim_cm: { '$elemMatch' => { '$gt' => 22, '$lt' => 30 } }) # End Example 26 end it 'matches the expected output' do expect(example.count).to eq(1) end end context 'example 27' do let(:example) do # Start Example 27 client[:inventory].find('dim_cm.1' => { '$gt' => 25 }) # End Example 27 end it 'matches the expected output' do expect(example.count).to eq(1) end end context 'example 28' do let(:example) do # Start Example 28 client[:inventory].find(tags: { '$size' => 3 }) # End Example 28 end it 'matches the expected output' do expect(example.count).to eq(1) end end end context 'query array of embedded documents' do before do # Start Example 29 client[:inventory].insert_many([{ item: 'journal', instock: [ { warehouse: 'A', qty: 5 }, { warehouse: 'C', qty: 15 }] }, { item: 'notebook', instock: [ { warehouse: 'C', qty: 5 }] }, { item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 15 }] }, { item: 'planner', instock: [ { warehouse: 'A', qty: 40 }, { warehouse: 'B', qty: 5 }] }, { item: 'postcard', instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }] } ]) # End Example 29 end context 'example 30' do let(:example) do # Start Example 30 client[:inventory].find(instock: { warehouse: 'A', qty: 5 }) # End Example 30 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 31' do let(:example) do # Start Example 31 client[:inventory].find(instock: { qty: 5, warehouse: 'A' } ) # End Example 31 end it 'matches the expected output' do expect(example.to_a.size).to eq(0) end end context 'example 32' do let(:example) do # Start Example 32 client[:inventory].find('instock.0.qty' => { '$lte' => 20 }) # End Example 32 end it 'matches the expected output' do expect(example.to_a.size).to eq(3) end end context 'example 33' do let(:example) do # Start Example 33 client[:inventory].find('instock.qty' => { '$lte' => 20 }) # End Example 33 end it 'matches the expected output' do expect(example.to_a.size).to eq(5) end end context 'example 34' do let(:example) do # Start Example 34 client[:inventory].find(instock: { '$elemMatch' => { qty: 5, warehouse: 'A' } }) # End Example 34 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 35' do let(:example) do # Start Example 35 client[:inventory].find(instock: { '$elemMatch' => { qty: { '$gt' => 10, '$lte' => 20 } } }) # End Example 35 end it 'matches the expected output' do expect(example.to_a.size).to eq(3) end end context 'example 36' do let(:example) do # Start Example 36 client[:inventory].find('instock.qty' => { '$gt' => 10, '$lte' => 20 }) # End Example 36 end it 'matches the expected output' do expect(example.to_a.size).to eq(4) end end context 'example 37' do let(:example) do # Start Example 37 client[:inventory].find('instock.qty' => 5, 'instock.warehouse' => 'A') # End Example 37 end it 'matches the expected output' do expect(example.to_a.size).to eq(2) end end end context 'query null' do before do # Start Example 38 client[:inventory].insert_many([{ _id: 1, item: nil }, { _id: 2 }]) # End Example 38 end context 'example 39' do let(:example) do # Start Example 39 client[:inventory].find(item: nil) # End Example 39 end it 'matches the expected output' do expect(example.to_a.size).to eq(2) end end context 'example 40' do let(:example) do # Start Example 40 client[:inventory].find(item: { '$type' => 10 }) # End Example 40 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end context 'example 41' do let(:example) do # Start Example 41 client[:inventory].find(item: { '$exists' => false }) # End Example 41 end it 'matches the expected output' do expect(example.to_a.size).to eq(1) end end end context 'projection' do before do # Start Example 42 client[:inventory].insert_many([{ item: 'journal', status: 'A', size: { h: 14, w: 21, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 5 }] }, { item: 'notebook', status: 'A', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'C', qty: 5 }] }, { item: 'paper', status: 'D', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'A', qty: 60 }] }, { item: 'planner', status: 'D', size: { h: 22.85, w: 30, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 40 }] }, { item: 'postcard', status: 'A', size: { h: 10, w: 15.25, uom: 'cm' }, instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }] }]) # End Example 42 end context 'example 43' do let(:example) do # Start Example 43 client[:inventory].find(status: 'A') # End Example 43 end it 'matches the expected output' do expect(example.to_a.size).to eq(3) end end context 'example 44' do let!(:example) do # Start Example 44 client[:inventory].find({ status: 'A' }, projection: { item: 1, status: 1 }) # End Example 44 end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).to be_nil expect(example.to_a[1]['instock']).to be_nil end end context 'example 45' do let!(:example) do # Start Example 45 client[:inventory].find({ status: 'A' }, projection: { item: 1, status: 1, _id: 0 }) # End Example 45 end it 'matches the expected output' do expect(example.to_a[1]['_id']).to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).to be_nil expect(example.to_a[1]['instock']).to be_nil end end context 'example 46' do let!(:example) do # Start Example 46 client[:inventory].find({ status: 'A' }, projection: { status: 0, instock: 0 }) # End Example 46 end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).to be_nil expect(example.to_a[1]['size']).not_to be_nil expect(example.to_a[1]['instock']).to be_nil end end context 'example 47' do let!(:example) do # Start Example 47 client[:inventory].find({ status: 'A' }, projection: { 'item' => 1, 'status' => 1, 'size.uom' => 1 }) # End Example 47 end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).not_to be_nil expect(example.to_a[1]['instock']).to be_nil expect(example.to_a[1]['size']).not_to be_nil expect(example.to_a[1]['size']['uom']).not_to be_nil expect(example.to_a[1]['size']['h']).to be_nil expect(example.to_a[1]['size']['w']).to be_nil end end context 'example 48' do let!(:example) do # Start Example 48 client[:inventory].find({ status: 'A' }, projection: { 'size.uom' => 0 }) # End Example 48 end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).not_to be_nil expect(example.to_a[1]['instock']).not_to be_nil expect(example.to_a[1]['size']).not_to be_nil expect(example.to_a[1]['size']['uom']).to be_nil expect(example.to_a[1]['size']['h']).not_to be_nil expect(example.to_a[1]['size']['w']).not_to be_nil end end context 'example 49' do let!(:example) do # Start Example 49 client[:inventory].find({ status: 'A' }, projection: {'item' => 1, 'status' => 1, 'instock.qty' => 1 }) # End Example 49 end let(:instock_list) do example.to_a[1]['instock'] end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).to be_nil expect(example.to_a[1]['instock']).not_to be_nil expect(instock_list.collect { |doc| doc['warehouse'] }.compact).to be_empty expect(instock_list.collect { |doc| doc['qty'] }).to eq([5]) end end context 'example 50' do let!(:example) do # Start Example 50 client[:inventory].find({ status: 'A' }, projection: {'item' => 1, 'status' => 1, 'instock' => { '$slice' => -1 } }) # End Example 50 end let(:instock_list) do example.to_a[1]['instock'] end it 'matches the expected output' do expect(example.to_a[1]['_id']).not_to be_nil expect(example.to_a[1]['item']).not_to be_nil expect(example.to_a[1]['status']).not_to be_nil expect(example.to_a[1]['size']).to be_nil expect(example.to_a[1]['instock']).not_to be_nil expect(instock_list.size).to eq(1) end end end context 'update' do before do # Start Example 51 client[:inventory].insert_many([ { item: 'canvas', qty: 100, size: { h: 28, w: 35.5, uom: 'cm' }, status: 'A' }, { item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }, { item: 'mat', qty: 85, size: { h: 27.9, w: 35.5, uom: 'cm' }, status: 'A' }, { item: 'mousepad', qty: 25, size: { h: 19, w: 22.85, uom: 'cm' }, status: 'P' }, { item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'P' }, { item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }, { item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }, { item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }, { item: 'sketchbook', qty: 80, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }, { item: 'sketch pad', qty: 95, size: { h: 22.85, w: 30.5, uom: 'cm' }, status: 'A' } ]) # End Example 51 end context 'example 52' do let!(:example) do # Start Example 52 client[:inventory].update_one({ item: 'paper'}, { '$set' => { 'size.uom' => 'cm', 'status' => 'P' }, '$currentDate' => { 'lastModified' => true } }) # End Example 52 end it 'matches the expected output' do expect(client[:inventory].find(item: 'paper').all? { |doc| doc['size']['uom'] == 'cm'}).to be(true) expect(client[:inventory].find(item: 'paper').all? { |doc| doc['status'] == 'P'}).to be(true) expect(client[:inventory].find(item: 'paper').all? { |doc| doc['lastModified'] }).to be(true) end end context 'example 53' do let!(:example) do # Start Example 53 client[:inventory].update_many({ qty: { '$lt' => 50 } }, { '$set' => { 'size.uom' => 'in', 'status' => 'P' }, '$currentDate' => { 'lastModified' => true } }) # End Example 53 end let(:from_db) do client[:inventory].find(qty: { '$lt' => 50 }) end it 'matches the expected output' do expect(from_db.all? { |doc| doc['size']['uom'] == 'in'}).to be(true) expect(from_db.all? { |doc| doc['status'] == 'P'}).to be(true) expect(from_db.all? { |doc| doc['lastModified'] }).to be(true) end end context 'example 54' do let!(:example) do # Start Example 54 client[:inventory].replace_one({ item: 'paper' }, { item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 } ] }) # End Example 54 end let(:from_db) do client[:inventory].find({ item: 'paper' }, projection: { _id: 0 }) end it 'matches the expected output' do expect(from_db.first.keys.size).to eq(2) expect(from_db.first.key?('item')).to be(true) expect(from_db.first.key?('instock')).to be(true) expect(from_db.first['instock'].size).to eq(2) end end end context 'delete' do before do # Start Example 55 client[:inventory].insert_many([ { item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }, { item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'P' }, { item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }, { item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }, { item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }, ]) # End Example 55 end context 'example 56' do let(:example) do # Start Example 56 client[:inventory].delete_many({}) # End Example 56 end it 'matches the expected output' do expect(example.deleted_count).to eq(5) expect(client[:inventory].find.to_a.size).to eq(0) end end context 'example 57' do let(:example) do # Start Example 57 client[:inventory].delete_many(status: 'A') # End Example 57 end it 'matches the expected output' do expect(example.deleted_count).to eq(2) expect(client[:inventory].find.to_a.size).to eq(3) end end context 'example 58' do let(:example) do # Start Example 58 client[:inventory].delete_one(status: 'D') # End Example 58 end it 'matches the expected output' do expect(example.deleted_count).to eq(1) expect(client[:inventory].find.to_a.size).to eq(4) end end end end mongo-2.5.1/spec/mongo/max_staleness_spec.rb0000644000004100000410000001110613257253113021145 0ustar www-datawww-datarequire 'spec_helper' describe 'Max Staleness Spec' do include Mongo::ServerSelection::Read MAX_STALENESS_TESTS.each do |file| spec = Mongo::ServerSelection::Read::Spec.new(file) context(spec.description) do let(:topology) do spec.type.new({}, monitoring, []) end let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:listeners) do Mongo::Event::Listeners.new end let(:options) do if spec.heartbeat_frequency TEST_OPTIONS.merge(heartbeat_frequency: spec.heartbeat_frequency) else TEST_OPTIONS.dup.tap do |opts| opts.delete(:heartbeat_frequency) end end.merge!(server_selection_timeout: 0.2, connect_timeout: 0.1) end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:single?).and_return(topology.single?) allow(c).to receive(:sharded?).and_return(topology.sharded?) allow(c).to receive(:replica_set?).and_return(topology.replica_set?) allow(c).to receive(:unknown?).and_return(topology.unknown?) allow(c).to receive(:options).and_return(options) allow(c).to receive(:scan!).and_return(true) allow(c).to receive(:app_metadata).and_return(app_metadata) end end let(:candidate_servers) do spec.candidate_servers.collect do |server| features = double('features').tap do |feat| allow(feat).to receive(:max_staleness_enabled?).and_return(server['maxWireVersion'] && server['maxWireVersion'] >= 5) allow(feat).to receive(:check_driver_support!).and_return(true) end address = Mongo::Address.new(server['address']) Mongo::Server.new(address, cluster, monitoring, listeners, options).tap do |s| allow(s).to receive(:average_round_trip_time).and_return(server['avg_rtt_ms'] / 1000.0) if server['avg_rtt_ms'] allow(s).to receive(:tags).and_return(server['tags']) allow(s).to receive(:secondary?).and_return(server['type'] == 'RSSecondary') allow(s).to receive(:primary?).and_return(server['type'] == 'RSPrimary') allow(s).to receive(:connectable?).and_return(true) allow(s).to receive(:last_write_date).and_return(server['lastWrite']['lastWriteDate']['$numberLong'].to_i) if server['lastWrite'] allow(s).to receive(:last_scan).and_return(server['lastUpdateTime']) allow(s).to receive(:features).and_return(features) end end end let(:in_latency_window) do spec.in_latency_window.collect do |server| Mongo::Server.new(Mongo::Address.new(server['address']), cluster, monitoring, listeners, options) end end let(:server_selector_definition) do { mode: spec.read_preference['mode'] }.tap do |definition| definition[:tag_sets] = spec.read_preference['tag_sets'] definition[:max_staleness] = spec.max_staleness if spec.max_staleness end end let(:server_selector) do Mongo::ServerSelector.get(server_selector_definition) end before do allow(cluster).to receive(:servers).and_return(candidate_servers) end context 'when the max staleness is invalid' do it 'Raises an InvalidServerPreference exception', if: spec.invalid_max_staleness? do expect do server_selector.select_server(cluster) end.to raise_exception(Mongo::Error::InvalidServerPreference) end end context 'when the max staleness is valid' do context 'when there are available servers' do it 'Finds all suitable servers in the latency window', if: (spec.replica_set? && !spec.invalid_max_staleness? && spec.server_available?) do expect(server_selector.send(:select, cluster.servers)).to match_array(in_latency_window) end it 'Finds the most suitable server in the latency window', if: (!spec.invalid_max_staleness? && spec.server_available?) do expect(in_latency_window).to include(server_selector.select_server(cluster)) end end context 'when there are no available servers', if: (!spec.invalid_max_staleness? && !spec.server_available?) do it 'Raises a NoServerAvailable Exception' do expect do server_selector.select_server(cluster) end.to raise_exception(Mongo::Error::NoServerAvailable) end end end end end end mongo-2.5.1/spec/mongo/gridfs_spec.rb0000644000004100000410000000251513257253113017561 0ustar www-datawww-datarequire 'spec_helper' describe 'GridFS' do include Mongo::GridFS GRIDFS_TESTS.each do |file| spec = Mongo::GridFS::Spec.new(file) context(spec.description) do spec.tests.each do |test| context(test.description) do after do fs.files_collection.delete_many fs.chunks_collection.delete_many test.expected_files_collection.delete_many test.expected_chunks_collection.delete_many end let!(:result) do test.run(fs) end let(:fs) do authorized_collection.database.fs end it "raises the correct error", if: test.error? do expect(result).to match_error(test.expected_error) end it 'completes successfully', unless: test.error? do expect(result).to completes_successfully(test) end it 'has the correct documents in the files collection', if: test.assert_data? do expect(fs.files_collection).to match_files_collection(test.expected_files_collection) end it 'has the correct documents in the chunks collection', if: test.assert_data? do expect(fs.chunks_collection).to match_chunks_collection(test.expected_chunks_collection) end end end end end end mongo-2.5.1/spec/mongo/address_spec.rb0000644000004100000410000001431013257253113017724 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Address do describe '#==' do context 'when the other host and port are the same' do let(:address) do described_class.new('127.0.0.1:27017') end let(:other) do described_class.new('127.0.0.1:27017') end it 'returns true' do expect(address).to eq(other) end end context 'when the other port is different' do let(:address) do described_class.new('127.0.0.1:27017') end let(:other) do described_class.new('127.0.0.1:27018') end it 'returns false' do expect(address).to_not eq(other) end end context 'when the other host is different' do let(:address) do described_class.new('127.0.0.1:27017') end let(:other) do described_class.new('127.0.0.2:27017') end it 'returns false' do expect(address).to_not eq(other) end end context 'when the other object is not an address' do let(:address) do described_class.new('127.0.0.1:27017') end it 'returns false' do expect(address).to_not eq('test') end end context 'when the addresses are identical unix sockets' do let(:address) do described_class.new('/path/to/socket.sock') end let(:other) do described_class.new('/path/to/socket.sock') end it 'returns true' do expect(address).to eq(other) end end end describe '#hash' do let(:address) do described_class.new('127.0.0.1:27017') end it 'hashes on the host and port' do expect(address.hash).to eq([ '127.0.0.1', 27017 ].hash) end end describe '#initialize' do context 'when providing an ipv4 host' do context 'when a port is provided' do let(:address) do described_class.new('127.0.0.1:27017') end it 'sets the port' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('127.0.0.1') end end context 'when no port is provided' do let(:address) do described_class.new('127.0.0.1') end it 'sets the port to 27017' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('127.0.0.1') end end end context 'when providing an ipv6 host' do context 'when a port is provided' do let(:address) do described_class.new('[::1]:27017') end it 'sets the port' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('::1') end end context 'when no port is provided' do let(:address) do described_class.new('[::1]') end it 'sets the port to 27017' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('::1') end end end context 'when providing a DNS entry' do context 'when a port is provided' do let(:address) do described_class.new('localhost:27017') end it 'sets the port' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('localhost') end end context 'when a port is not provided' do let(:address) do described_class.new('localhost') end it 'sets the port to 27017' do expect(address.port).to eq(27017) end it 'sets the host' do expect(address.host).to eq('localhost') end end end context 'when providing a socket path' do let(:address) do described_class.new('/path/to/socket.sock') end it 'sets the port to nil' do expect(address.port).to be_nil end it 'sets the host' do expect(address.host).to eq('/path/to/socket.sock') end end end describe "#socket" do let(:address) do default_address end let(:host) do address.host end let(:addr_info) do family = (host == 'localhost') ? ::Socket::AF_INET : ::Socket::AF_UNSPEC ::Socket.getaddrinfo(host, nil, family, ::Socket::SOCK_STREAM) end let(:socket_address_or_host) do (host == 'localhost') ? addr_info.first[3] : host end context 'when providing a DNS entry that resolves to both IPv6 and IPv4' do before do address.instance_variable_set(:@resolver, nil) allow(::Socket).to receive(:getaddrinfo).and_return( [ ["AF_INET6", 0, '::1', '::1', ::Socket::AF_INET6, 1, 6], ["AF_INET", 0, socket_address_or_host, socket_address_or_host, ::Socket::AF_INET, 1, 6]] ) end it "attempts to use IPv6 and fallbacks to IPv4" do expect(address.socket(0.0)).not_to be_nil end end context 'when creating a socket using the resolver' do before do address.instance_variable_set(:@resolver, nil) address.send(:initialize_resolver!, (SSL ? SSL_OPTIONS : {})) end it 'uses the host, not the IP address' do expect(address.socket(0.0).host).to eq(socket_address_or_host) end let(:socket) do if running_ssl? address.socket(0.0, SSL_OPTIONS).instance_variable_get(:@tcp_socket) else address.socket(0.0).instance_variable_get(:@socket) end end if Socket.const_defined?(:TCP_KEEPINTVL) it 'sets the socket TCP_KEEPINTVL option' do expect(socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_KEEPINTVL).int).to be <= 10 end end if Socket.const_defined?(:TCP_KEEPCNT) it 'sets the socket TCP_KEEPCNT option' do expect(socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_KEEPCNT).int).to be <= 9 end end if Socket.const_defined?(:TCP_KEEPIDLE) it 'sets the socket TCP_KEEPIDLE option' do expect(socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_KEEPIDLE).int).to be <= 300 end end end end end mongo-2.5.1/spec/mongo/sdam_spec.rb0000644000004100000410000000737313257253113017236 0ustar www-datawww-datarequire 'spec_helper' describe 'Server Discovery and Monitoring' do include Mongo::SDAM SERVER_DISCOVERY_TESTS.each do |file| spec = Mongo::SDAM::Spec.new(file) context(spec.description) do before(:all) do @client = Mongo::Client.new([]) @client.send(:create_from_uri, spec.uri_string) client_options = @client.instance_variable_get(:@options) @client.instance_variable_set(:@options, client_options.merge(heartbeat_frequency: 100, connect_timeout: 0.1)) @client.cluster.instance_variable_set(:@options, client_options.merge(heartbeat_frequency: 100, connect_timeout: 0.1)) @client.cluster.instance_variable_get(:@servers).each { |s| s.disconnect!; s.unknown!; } end after(:all) do @client.close end spec.phases.each_with_index do |phase, index| context("Phase: #{index + 1}") do before(:all) do phase.responses.each do |response| server = find_server(@client, response.address) unless server server = Mongo::Server.new( Mongo::Address.new(response.address), @client.cluster, @client.instance_variable_get(:@monitoring), @client.cluster.send(:event_listeners), @client.cluster.options ) server.disconnect! server.unknown! end monitor = server.instance_variable_get(:@monitor) description = monitor.inspector.run(server.description, response.ismaster, 0.5) monitor.instance_variable_set(:@description, description) end end if phase.outcome.compatible? let(:cluster_addresses) do @client.cluster.instance_variable_get(:@servers). collect(&:address).collect(&:to_s).uniq.sort end let(:phase_addresses) do phase.outcome.servers.keys.sort end it "sets the cluster topology to #{phase.outcome.topology_type}" do expect(@client.cluster).to be_topology(phase.outcome.topology_type) end it "sets the cluster replica set name to #{phase.outcome.set_name.inspect}" do expect(@client.cluster.replica_set_name).to eq(phase.outcome.set_name) end it "sets the cluster logical session timeout minutes to #{phase.outcome.logical_session_timeout.inspect}" do expect(@client.cluster.logical_session_timeout).to eq(phase.outcome.logical_session_timeout) end it "has the expected servers in the cluster" do expect(cluster_addresses).to eq(phase_addresses) end phase.outcome.servers.each do |uri, server| it "sets #{uri} to #{server['type']}" do expect(find_server(@client, uri)).to be_server_type(server['type']) end it "sets #{uri} replica set name to #{server['setName'].inspect}" do expect(find_server(@client, uri).replica_set_name).to eq(server['setName']) end end else before do @client.cluster.servers.each do |server| allow(server).to receive(:connectable?).and_return(true) end end it 'raises an UnsupportedFeatures error' do expect { Mongo::ServerSelector.get(mode: :primary).select_server(@client.cluster) Mongo::ServerSelector.get(mode: :secondary).select_server(@client.cluster) }.to raise_exception(Mongo::Error::UnsupportedFeatures) end end end end end end end mongo-2.5.1/spec/mongo/grid/0000755000004100000410000000000013257253113015666 5ustar www-datawww-datamongo-2.5.1/spec/mongo/grid/stream/0000755000004100000410000000000013257253113017161 5ustar www-datawww-datamongo-2.5.1/spec/mongo/grid/stream/read_spec.rb0000644000004100000410000001253713257253113021443 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::FSBucket::Stream::Read do let(:fs_options) do { } end let(:fs) do authorized_client.database.fs(fs_options) end let(:options) do { file_id: file_id } end let(:filename) do 'specs.rb' end let!(:file_id) do fs.upload_from_stream(filename, File.open(__FILE__)) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:stream) do described_class.new(fs, options) end describe '#initialize' do it 'sets the file id' do expect(stream.file_id).to eq(file_id) end it 'sets the fs object' do expect(stream.fs).to eq(fs) end context 'when there is a read preference set on the FSBucket' do let(:fs_options) do { read: { mode: :secondary } } end it 'uses the read preference of the fs as a default' do expect(stream.read_preference).to eq(fs.read_preference) end end it 'opens a stream' do expect(stream.close).to eq(file_id) end context 'when provided options' do context 'when provided read preference' do let(:options) do { file_id: file_id, read: { mode: :primary_preferred } } end it 'sets the read preference' do expect(stream.read_preference).to eq(options[:read]) end it 'sets the read preference on the view' do expect(stream.send(:view).read).to eq(BSON::Document.new(options[:read])) end end context 'when provided a file_id' do it 'sets the file id' do expect(stream.file_id).to eq(options[:file_id]) end end end end describe '#each' do let(:filename) do 'specs.rb' end let!(:file_id) do fs.upload_from_stream(filename, File.open(__FILE__)) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:fs_options) do { chunk_size: 5 } end it 'iterates over all the chunks of the file' do stream.each do |chunk| expect(chunk).not_to be(nil) end end context 'when the stream is closed' do before do stream.close end it 'does not allow further iteration' do expect { stream.to_a }.to raise_error(Mongo::Error::ClosedStream) end end context 'when a chunk is found out of order' do before do view = stream.fs.chunks_collection.find({ :files_id => file_id }, options).sort(:n => -1) stream.instance_variable_set(:@view, view) expect(stream).to receive(:close) end it 'raises an exception' do expect { stream.to_a }.to raise_error(Mongo::Error::MissingFileChunk) end it 'closes the query' do begin stream.to_a rescue Mongo::Error::MissingFileChunk end end end context 'when a chunk does not have the expected length' do before do stream.send(:file_info) stream.instance_variable_get(:@file_info).document['chunkSize'] = 4 expect(stream).to receive(:close) end it 'raises an exception' do expect { stream.to_a }.to raise_error(Mongo::Error::UnexpectedChunkLength) end it 'closes the query' do begin stream.to_a rescue Mongo::Error::UnexpectedChunkLength end end end context 'when there is no files document found' do before do fs.files_collection.delete_many end it 'raises an Exception' do expect{ stream.to_a }.to raise_exception(Mongo::Error::FileNotFound) end end end describe '#read' do let(:filename) do 'specs.rb' end let(:file) do File.open(__FILE__) end let(:file_id) do fs.upload_from_stream(filename, file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end it 'returns a string of all data' do expect(stream.read.size).to eq(file.size) end end describe '#file_info' do it 'returns a files information document' do expect(stream.file_info).to be_a(Mongo::Grid::File::Info) end end describe '#close' do let(:view) do stream.instance_variable_get(:@view) end before do stream.to_a end it 'returns the file id' do expect(stream.close).to eq(file_id) end context 'when the stream is closed' do before do stream.to_a expect(view).to receive(:close_query).and_call_original end it 'calls close_query on the view' do expect(stream.close).to be_a(BSON::ObjectId) end end context 'when the stream is already closed' do before do stream.close end it 'raises an exception' do expect { stream.close }.to raise_error(Mongo::Error::ClosedStream) end end end describe '#closed?' do context 'when the stream is closed' do before do stream.close end it 'returns true' do expect(stream.closed?).to be(true) end end context 'when the stream is still open' do it 'returns false' do expect(stream.closed?).to be(false) end end end end mongo-2.5.1/spec/mongo/grid/stream/write_spec.rb0000644000004100000410000002671113257253113021661 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::FSBucket::Stream::Write do let(:file) do File.open(__FILE__) end let(:file2) do File.open(__FILE__) end let(:fs_options) do { } end let(:fs) do authorized_client.database.fs(fs_options) end let(:filename) do 'specs.rb' end let(:extra_options) do { } end let(:options) do { filename: filename }.merge(extra_options) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:stream) do described_class.new(fs, options) end describe '#initialize' do it 'sets the file id' do expect(stream.file_id).to be_a(BSON::ObjectId) end it 'sets the fs object' do expect(stream.fs).to eq(fs) end it 'opens a stream' do expect(stream.close).to be_a(BSON::ObjectId) end context 'when the fs has a write concern', if: standalone? do let(:fs_options) do { write: INVALID_WRITE_CONCERN } end it 'uses the write concern of the fs as a default' do expect{ stream.close }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the fs does not have a write concern' do let(:fs) do authorized_client.with(write: nil).database.fs end it 'uses the write concern default at the operation level' do expect(stream.write(file).closed?).to eq(false) end end context 'when provided options' do context 'when provided a write option' do let(:extra_options) do { write: INVALID_WRITE_CONCERN } end let(:expected) do Mongo::WriteConcern.get(options[:write]).options end it 'sets the write concern' do expect(stream.write_concern.options).to eq(expected) end context 'when chunks are inserted' do it 'uses that write concern' do expect(stream.send(:chunks_collection).write_concern.options[:w]).to eq(expected[:w]) end end context 'when a files document is inserted' do it 'uses that write concern' do expect(stream.send(:files_collection).write_concern.options[:w]).to eq(expected[:w]) end end end context 'when provided a metadata document' do let(:options) do { metadata: { 'some_field' => 'test-file' } } end it 'sets the metadata document' do expect(stream.send(:file_info).metadata).to eq(options[:metadata]) end end context 'when provided a chunk size option' do let(:options) do { chunk_size: 50 } end it 'sets the chunk size' do expect(stream.send(:file_info).chunk_size).to eq(options[:chunk_size]) end context 'when chunk size is also set on the FSBucket object' do let(:fs_options) do { chunk_size: 100 } end it 'uses the write stream options' do expect(stream.send(:file_info).chunk_size).to eq(options[:chunk_size]) end end end context 'when provided a content type option' do let(:options) do { content_type: 'text/plain' } end it 'sets the content type' do expect(stream.send(:file_info).content_type).to eq(options[:content_type]) end end context 'when provided an aliases option' do let(:options) do { aliases: [ 'testing-file' ] } end it 'sets the aliases' do expect(stream.send(:file_info).document[:aliases]).to eq(options[:aliases]) end end context 'when provided a file_id option' do let(:options) do { file_id: 'Custom ID' } end it 'assigns the stream the file id' do expect(stream.file_id).to eq(options[:file_id]) end end end end describe '#write' do after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:file_from_db) do fs.find_one(filename: filename) end context 'when the stream is written to' do before do stream.write(file) end it 'does not close the stream' do expect(stream).not_to receive(:close) end end context 'when indexes need to be ensured' do context 'when the files collection is empty' do before do fs.files_collection.delete_many fs.chunks_collection.delete_many expect(fs.files_collection).to receive(:indexes).and_call_original expect(fs.chunks_collection).to receive(:indexes).and_call_original stream.write(file) end let(:chunks_index) do fs.database[fs.chunks_collection.name].indexes.get(:files_id => 1, :n => 1) end let(:files_index) do fs.database[fs.files_collection.name].indexes.get(:filename => 1, :uploadDate => 1) end it 'creates an index on the files collection' do expect(files_index[:name]).to eq('filename_1_uploadDate_1') end it 'creates an index on the chunks collection' do expect(chunks_index[:name]).to eq('files_id_1_n_1') end context 'when write is called more than once' do before do expect(fs).not_to receive(:ensure_indexes!) end it 'only creates the indexes the first time' do stream.write(file2) end end end context 'when the files collection is not empty' do before do fs.files_collection.insert_one(a: 1) expect(fs.files_collection).not_to receive(:indexes) expect(fs.chunks_collection).not_to receive(:indexes) stream.write(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:files_index) do fs.database[fs.files_collection.name].indexes.get(:filename => 1, :uploadDate => 1) end it 'assumes indexes already exist' do expect(files_index[:name]).to eq('filename_1_uploadDate_1') end end context 'when the index creation encounters an error' do before do fs.chunks_collection.drop fs.chunks_collection.indexes.create_one(Mongo::Grid::FSBucket::CHUNKS_INDEX, :unique => false) expect(fs.chunks_collection).to receive(:indexes).and_call_original expect(fs.files_collection).not_to receive(:indexes) end after do fs.database[fs.chunks_collection.name].indexes.drop_one('files_id_1_n_1') end it 'raises the error to the user' do expect { stream.write(file) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when provided an io stream' do context 'when no file id is specified' do before do stream.write(file) stream.close end it 'writes the contents of the stream' do expect(file_from_db.data.size).to eq(file.size) end it 'updates the length written' do expect(stream.send(:file_info).document['length']).to eq(file.size) end it 'updates the position (n)' do expect(stream.instance_variable_get(:@n)).to eq(1) end end context 'when a custom file id is provided' do let(:extra_options) do { file_id: 'Custom ID' } end let!(:id) do stream.write(file) stream.close end it 'writes the contents of the stream' do expect(file_from_db.data.size).to eq(file.size) end it 'updates the length written' do expect(stream.send(:file_info).document['length']).to eq(file.size) end it 'updates the position (n)' do expect(stream.instance_variable_get(:@n)).to eq(1) end it 'uses the custom file id' do expect(id).to eq(options[:file_id]) end end context 'when the user file contains no data' do before do stream.write(file) stream.close end let(:file) do StringIO.new('') end let(:files_coll_doc) do stream.fs.files_collection.find(filename: filename).to_a.first end let(:chunks_documents) do stream.fs.chunks_collection.find(files_id: stream.file_id).to_a end it 'creates a files document' do expect(files_coll_doc).not_to be(nil) end it 'sets length to 0 in the files document' do expect(files_coll_doc['length']).to eq(0) end it 'does not insert any chunks' do expect(file_from_db.data.size).to eq(file.size) end end end context 'when the stream is written to multiple times' do before do stream.write(file) stream.write(file2) stream.close end it 'writes the contents of the stream' do expect(file_from_db.data.size).to eq(file.size * 2) end it 'updates the length written' do expect(stream.send(:file_info).document['length']).to eq(file.size * 2) end it 'updates the position (n)' do expect(stream.instance_variable_get(:@n)).to eq(2) end end context 'when the stream is closed' do before do stream.close end it 'does not allow further writes' do expect { stream.write(file) }.to raise_error(Mongo::Error::ClosedStream) end end end describe '#close' do let(:file_content) do File.open(__FILE__).read end context 'when close is called on the stream' do before do stream.write(file) end let(:file_id) do stream.file_id end it 'returns the file id' do expect(stream.close).to eq(file_id) end end context 'when the stream is closed' do before do stream.write(file) stream.close end let(:md5) do Digest::MD5.new.update(file_content).hexdigest end let(:files_coll_doc) do stream.fs.files_collection.find(filename: filename).to_a.first end it 'inserts a file documents in the files collection' do expect(files_coll_doc['_id']).to eq(stream.file_id) end it 'updates the length in the files collection file document' do expect(stream.send(:file_info).document[:length]).to eq(file.size) end it 'updates the md5 in the files collection file document' do expect(stream.send(:file_info).document[:md5]).to eq(md5) end end context 'when the stream is already closed' do before do stream.close end it 'raises an exception' do expect { stream.close }.to raise_error(Mongo::Error::ClosedStream) end end end describe '#closed?' do context 'when the stream is closed' do before do stream.close end it 'returns true' do expect(stream.closed?).to be(true) end end context 'when the stream is still open' do it 'returns false' do expect(stream.closed?).to be(false) end end end end mongo-2.5.1/spec/mongo/grid/file/0000755000004100000410000000000013257253113016605 5ustar www-datawww-datamongo-2.5.1/spec/mongo/grid/file/info_spec.rb0000644000004100000410000000444613257253113021107 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::File::Info do describe '#==' do let(:upload_date) do Time.now.utc end let(:info) do described_class.new(:filename => 'test.txt', :length => 7, :uploadDate => upload_date) end context 'when the other is not a file info object' do it 'returns false' do expect(info).to_not eq('test') end end context 'when the other object is file info object' do context 'when the documents are equal' do it 'returns true' do expect(info).to eq(info) end end context 'when the documents are not equal' do let(:other) do described_class.new(:filename => 'testing.txt') end it 'returns false' do expect(info).to_not eq(other) end end end end describe '#initialize' do context 'when provided only a filename and length' do let(:info) do described_class.new(:filename => 'test.txt', :length => 7) end it 'sets the default id' do expect(info.id).to be_a(BSON::ObjectId) end it 'sets the upload date' do expect(info.upload_date).to be_a(Time) end it 'sets the chunk size' do expect(info.chunk_size).to eq(Mongo::Grid::File::Chunk::DEFAULT_SIZE) end it 'sets the content type' do expect(info.content_type).to eq(Mongo::Grid::File::Info::DEFAULT_CONTENT_TYPE) end end end describe '#inspect' do let(:info) do described_class.new(:filename => 'test.txt', :length => 7) end it 'includes the chunk size' do expect(info.inspect).to include(info.chunk_size.to_s) end it 'includes the filename' do expect(info.inspect).to include(info.filename) end it 'includes the md5' do expect(info.inspect).to include(info.md5.to_s) end it 'includes the id' do expect(info.inspect).to include(info.id.to_s) end end context 'when there are extra options' do let(:info) do described_class.new(:filename => 'test.txt', :extra_field => 'extra') end it 'does not include them in the document written to the database' do expect(info.document['extra_field']).to be_nil expect(info.document[:extra_field]).to be_nil end end end mongo-2.5.1/spec/mongo/grid/file/chunk_spec.rb0000644000004100000410000001053713257253113021262 0ustar www-datawww-datarequire 'spec_helper' require 'stringio' describe Mongo::Grid::File::Chunk do let(:data) do BSON::Binary.new('testing') end let(:file_id) do BSON::ObjectId.new end let(:file_info) do Mongo::Grid::File::Info.new(:files_id => file_id) end describe '#==' do let(:chunk) do described_class.new(:data => data, :files_id => file_id, :n => 5) end context 'when the other is not a chunk' do it 'returns false' do expect(chunk).to_not eq('test') end end context 'when the other object is a chunk' do context 'when the documents are equal' do it 'returns true' do expect(chunk).to eq(chunk) end end context 'when the documents are not equal' do let(:other) do described_class.new(:data => data, :files_id => file_id, :n => 6) end it 'returns false' do expect(chunk).to_not eq(other) end end end end describe '.assemble' do let(:data_size) do Mongo::Grid::File::Chunk::DEFAULT_SIZE * 3 end let(:raw_data) do 'testing' end let(:data) do BSON::Binary.new(raw_data) end let(:assembled) do described_class.assemble(chunks) end before do (1..data_size).each{ |i| raw_data << '1' } end let(:chunks) do described_class.split(raw_data, file_info) end it 'returns the chunks assembled into the raw data' do expect(assembled).to eq(raw_data) end end describe '#document' do let(:chunk) do described_class.new(:data => data, :files_id => file_id, :n => 5) end let(:document) do chunk.document end it 'sets the data' do expect(document[:data]).to eq(data) end it 'sets the files_id' do expect(document[:files_id]).to eq(file_id) end it 'sets the position' do expect(document[:n]).to eq(5) end it 'sets an object id' do expect(document[:_id]).to be_a(BSON::ObjectId) end context 'when asking for the document multiple times' do it 'returns the same document' do expect(document[:_id]).to eq(chunk.document[:_id]) end end end describe '#initialize' do let(:chunk) do described_class.new(:data => data, :files_id => file_id, :n => 5) end it 'sets the document' do expect(chunk.data).to eq(data) end it 'sets a default id' do expect(chunk.id).to be_a(BSON::ObjectId) end end describe '#to_bson' do let(:chunk) do described_class.new(:data => data, :files_id => file_id, :n => 5) end let(:document) do chunk.document end it 'returns the document as bson' do expect(chunk.to_bson.to_s).to eq(document.to_bson.to_s) end end describe '.split' do context 'when the data is smaller than the default size' do let(:raw_data) do 'testing' end let(:data) do BSON::Binary.new(raw_data) end let(:chunks) do described_class.split(raw_data, file_info) end let(:chunk) do chunks.first end it 'returns a single chunk' do expect(chunks.size).to eq(1) end it 'sets the correct chunk position' do expect(chunk.n).to eq(0) end it 'sets the correct chunk data' do expect(chunk.data).to eq(data) end end context 'when the data is larger that the default size' do let(:data_size) do Mongo::Grid::File::Chunk::DEFAULT_SIZE * 3 end let(:raw_data) do 'testing' end let(:data) do BSON::Binary.new(raw_data) end let(:assembled) do full_data = '' chunks.each do |chunk| full_data << chunk.data.data end full_data end before do (1..data_size).each{ |i| raw_data << '1' } end let(:chunks) do described_class.split(raw_data, file_info) end it 'returns the correct number of chunks' do expect(chunks.size).to eq(4) end it 'sets the correct chunk positions' do expect(chunks[0].n).to eq(0) expect(chunks[1].n).to eq(1) expect(chunks[2].n).to eq(2) expect(chunks[3].n).to eq(3) end it 'does to miss any bytes' do expect(assembled).to eq(raw_data) end end end end mongo-2.5.1/spec/mongo/grid/fs_bucket_spec.rb0000644000004100000410000007142013257253113021176 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::FSBucket do let(:fs) do described_class.new(client.database, options) end let(:client) do authorized_client end let(:options) do { } end let(:filename) do 'specs.rb' end let(:file) do File.open(__FILE__) end describe '#initialize' do it 'sets the files collection' do expect(fs.files_collection.name).to eq('fs.files') end it 'sets the chunks collection' do expect(fs.chunks_collection.name).to eq('fs.chunks') end context 'when options are provided' do let(:fs) do described_class.new(authorized_client.database, options) end context 'when a write concern is set' do context 'when the option :write is provided' do let(:options) do { write: { w: 2 } } end it 'sets the write concern' do expect(fs.send(:write_concern).options).to eq(Mongo::WriteConcern.get(w: 2).options) end end end context 'when a read preference is set' do let(:options) do { read: { mode: :secondary } } end it 'sets the read preference' do expect(fs.send(:read_preference)).to eq(options[:read]) end end context 'when a read preference is not set' do let(:database) do authorized_client.with(read: { mode: :secondary }).database end let(:fs) do described_class.new(database, options) end it 'uses the read preference of the database' do expect(fs.read_preference).to be(database.read_preference) end end context 'when a write stream is opened' do let(:stream) do fs.open_upload_stream('test.txt') end let(:fs) do described_class.new(authorized_client.database, options) end context 'when a write option is specified' do let(:options) do { write: { w: 2 } } end it 'passes the write concern to the write stream' do expect(stream.write_concern.options).to eq(Mongo::WriteConcern.get(options[:write]).options) end end end end end describe '#find' do let(:fs) do described_class.new(authorized_client.database) end context 'when there is no selector provided' do let(:files) do [ Mongo::Grid::File.new('hello world!', :filename => 'test.txt'), Mongo::Grid::File.new('goodbye world!', :filename => 'test1.txt') ] end before do files.each do |file| fs.insert_one(file) end end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end it 'returns a collection view' do expect(fs.find).to be_a(Mongo::Collection::View) end it 'iterates over the documents in the result' do fs.find.each do |document| expect(document).to_not be_nil end end end context 'when provided a filter' do let(:view) do fs.find(filename: 'test.txt') end it 'returns a collection view for the filter' do expect(view.filter).to eq('filename' => 'test.txt') end end context 'when options are provided' do let(:view) do fs.find({filename: 'test.txt'}, options) end context 'when provided batch_size' do let(:options) do { batch_size: 5 } end it 'sets the batch_size on the view' do expect(view.batch_size).to eq(options[:batch_size]) end end context 'when provided limit' do let(:options) do { limit: 5 } end it 'sets the limit on the view' do expect(view.limit).to eq(options[:limit]) end end context 'when provided no_cursor_timeout' do let(:options) do { no_cursor_timeout: true } end it 'sets the no_cursor_timeout on the view' do expect(view.options[:no_cursor_timeout]).to eq(options[:no_cursor_timeout]) end end context 'when provided skip' do let(:options) do { skip: 5 } end it 'sets the skip on the view' do expect(view.skip).to eq(options[:skip]) end end context 'when provided sort' do let(:options) do { sort: { 'x' => Mongo::Index::ASCENDING } } end it 'sets the sort on the view' do expect(view.sort).to eq(options[:sort]) end end end end describe '#find_one' do let(:fs) do described_class.new(authorized_client.database) end let(:file) do Mongo::Grid::File.new('hello world!', :filename => 'test.txt') end before do fs.insert_one(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:from_db) do fs.find_one(:filename => 'test.txt') end let(:from_db_upload_date) do from_db.info.upload_date.strftime("%Y-%m-%d %H:%M:%S") end let(:file_info_upload_date) do file.info.upload_date.strftime("%Y-%m-%d %H:%M:%S") end it 'returns the assembled file from the db' do expect(from_db.filename).to eq(file.info.filename) end it 'maps the file info correctly' do expect(from_db.info.length).to eq(file.info.length) expect(from_db_upload_date).to eq(file_info_upload_date) end end describe '#insert_one' do let(:fs) do described_class.new(authorized_client.database) end let(:file) do Mongo::Grid::File.new('Hello!', :filename => 'test.txt') end context 'when inserting the file once' do let!(:result) do fs.insert_one(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:from_db) do fs.find_one(:filename => 'test.txt') end it 'inserts the file into the database' do expect(from_db.filename).to eq(file.info.filename) end it 'includes the chunks and data with the file' do expect(from_db.data).to eq('Hello!') end it 'returns the file id' do expect(result).to eq(file.id) end end context 'when the files collection is empty' do before do fs.files_collection.delete_many fs.chunks_collection.delete_many expect(fs.files_collection).to receive(:indexes).and_call_original expect(fs.chunks_collection).to receive(:indexes).and_call_original fs.insert_one(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:chunks_index) do fs.database[fs.chunks_collection.name].indexes.get(:files_id => 1, :n => 1) end let(:files_index) do fs.database[fs.files_collection.name].indexes.get(:filename => 1, :uploadDate => 1) end it 'creates an index on the files collection' do expect(files_index[:name]).to eq('filename_1_uploadDate_1') end it 'creates an index on the chunks collection' do expect(chunks_index[:name]).to eq('files_id_1_n_1') end context 'when a write operation is called more than once' do before do expect(fs).not_to receive(:ensure_indexes!) end let(:file2) do Mongo::Grid::File.new('Goodbye!', :filename => 'test2.txt') end it 'only creates the indexes the first time' do expect(fs.insert_one(file2)).to be_a(BSON::ObjectId) end end end context 'when the index creation encounters an error' do before do fs.chunks_collection.drop fs.chunks_collection.indexes.create_one(Mongo::Grid::FSBucket::CHUNKS_INDEX, :unique => false) expect(fs.chunks_collection).to receive(:indexes).and_call_original expect(fs.files_collection).not_to receive(:indexes) end after do fs.database[fs.chunks_collection.name].indexes.drop_one('files_id_1_n_1') end it 'raises the error to the user' do expect { fs.insert_one(file) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the files collection is not empty' do before do fs.files_collection.insert_one(a: 1) expect(fs.files_collection).not_to receive(:indexes) expect(fs.chunks_collection).not_to receive(:indexes) fs.insert_one(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:files_index) do fs.database[fs.files_collection.name].indexes.get(:filename => 1, :uploadDate => 1) end it 'assumes indexes already exist' do expect(files_index[:name]).to eq('filename_1_uploadDate_1') end end context 'when inserting the file more than once' do after do fs.files_collection.delete_many fs.chunks_collection.delete_many end it 'raises an error' do expect { fs.insert_one(file) fs.insert_one(file) }.to raise_error(Mongo::Error::BulkWriteError) end end context 'when the file exceeds the max bson size' do let(:fs) do described_class.new(authorized_client.database) end let(:file) do str = 'y' * 16777216 Mongo::Grid::File.new(str, :filename => 'large-file.txt') end before do fs.insert_one(file) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end it 'successfully inserts the file' do expect( fs.find_one(:filename => 'large-file.txt').chunks ).to eq(file.chunks) end end end describe '#delete_one' do let(:file) do Mongo::Grid::File.new('Hello!', :filename => 'test.txt') end before do fs.insert_one(file) fs.delete_one(file) end let(:from_db) do fs.find_one(:filename => 'test.txt') end it 'removes the file from the db' do expect(from_db).to be_nil end end describe '#delete' do let(:file_id) do fs.upload_from_stream(filename, file) end before do fs.delete(file_id) end let(:from_db) do fs.find_one(:filename => filename) end it 'removes the file from the db' do expect(from_db).to be_nil end context 'when a custom file id is used' do let(:custom_file_id) do fs.upload_from_stream(filename, file, file_id: 'Custom ID') end before do fs.delete(custom_file_id) end let(:from_db) do fs.find_one(:filename => filename) end it 'removes the file from the db' do expect(from_db).to be_nil end end end context 'when a read stream is opened' do let(:fs) do described_class.new(authorized_client.database, options) end let(:io) do StringIO.new end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end describe '#open_download_stream' do let!(:file_id) do fs.open_upload_stream(filename) do |stream| stream.write(file) end.file_id end context 'when a block is provided' do let!(:stream) do fs.open_download_stream(file_id) do |stream| io.write(stream.read) end end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end it 'closes the stream after the block completes' do expect(stream.closed?).to be(true) end it 'yields the stream to the block' do expect(io.size).to eq(file.size) end end context 'when a block is not provided' do let!(:stream) do fs.open_download_stream(file_id) end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end it 'does not close the stream' do expect(stream.closed?).to be(false) end it 'does not yield the stream to the block' do expect(io.size).to eq(0) end end context 'when a custom file id is provided' do let(:file) do File.open(__FILE__) end let!(:file_id) do fs.open_upload_stream(filename, file_id: 'Custom ID') do |stream| stream.write(file) end.file_id end context 'when a block is provided' do let!(:stream) do fs.open_download_stream(file_id) do |stream| io.write(stream.read) end end it 'yields the stream to the block' do expect(io.size).to eq(file.size) end end context 'when a block is not provided' do let!(:stream) do fs.open_download_stream(file_id) end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end it 'does not close the stream' do expect(stream.closed?).to be(false) end it 'does not yield the stream to the block' do expect(io.size).to eq(0) end end end end describe '#download_to_stream' do context 'sessions' do let(:options) do { session: session } end let(:file_id) do fs.open_upload_stream(filename) do |stream| stream.write(file) end.file_id end let(:operation) do fs.download_to_stream(file_id, io) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when the file is found' do let!(:file_id) do fs.open_upload_stream(filename) do |stream| stream.write(file) end.file_id end before do fs.download_to_stream(file_id, io) end it 'writes to the provided stream' do expect(io.size).to eq(file.size) end it 'does not close the stream' do expect(io.closed?).to be(false) end context 'when the file has length 0' do let(:file) do StringIO.new('') end let(:from_db) do fs.open_upload_stream(filename) { |s| s.write(file) } fs.find_one(:filename => filename) end it 'can read the file back' do expect(from_db.data.size).to eq(file.size) end end end context 'when there is no files collection document found' do it 'raises an exception' do expect{ fs.download_to_stream(BSON::ObjectId.new, io) }.to raise_exception(Mongo::Error::FileNotFound) end end context 'when a file has an id that is not an ObjectId' do before do fs.insert_one(file) fs.download_to_stream(file_id, io) end let(:file_id) do 'non-object-id' end let(:file) do Mongo::Grid::File.new(File.open(__FILE__).read, :filename => filename, :_id => file_id) end it 'reads the file successfully' do expect(io.size).to eq(file.data.size) end end end context 'when a read preference is specified' do let(:fs) do described_class.new(authorized_client.database, options) end let(:options) do { read: { mode: :secondary } } end let(:stream) do fs.open_download_stream(BSON::ObjectId) end it 'sets the read preference on the Stream::Read object' do expect(stream.read_preference).to eq(options[:read]) end end describe '#download_to_stream_by_name' do let(:files) do [ StringIO.new('hello 1'), StringIO.new('hello 2'), StringIO.new('hello 3'), StringIO.new('hello 4') ] end context ' when using a session' do let(:options) do { session: session } end let(:operation) do fs.download_to_stream_by_name('test.txt', io) end let(:client) do authorized_client end before do files.each do |file| authorized_client.database.fs.upload_from_stream('test.txt', file) end end let(:io) do StringIO.new end it_behaves_like 'an operation using a session' end context 'when not using a session' do before do files.each do |file| fs.upload_from_stream('test.txt', file) end end let(:io) do StringIO.new end context 'when revision is not specified' do let!(:result) do fs.download_to_stream_by_name('test.txt', io) end it 'returns the most recent version' do expect(io.string).to eq('hello 4') end end context 'when revision is 0' do let!(:result) do fs.download_to_stream_by_name('test.txt', io, revision: 0) end it 'returns the original stored file' do expect(io.string).to eq('hello 1') end end context 'when revision is negative' do let!(:result) do fs.download_to_stream_by_name('test.txt', io, revision: -2) end it 'returns that number of versions from the most recent' do expect(io.string).to eq('hello 3') end end context 'when revision is positive' do let!(:result) do fs.download_to_stream_by_name('test.txt', io, revision: 1) end it 'returns that number revision' do expect(io.string).to eq('hello 2') end end context 'when the file revision is not found' do it 'raises a FileNotFound error' do expect { fs.download_to_stream_by_name('test.txt', io, revision: 100) }.to raise_exception(Mongo::Error::InvalidFileRevision) end end context 'when the file is not found' do it 'raises a FileNotFound error' do expect { fs.download_to_stream_by_name('non-existent.txt', io) }.to raise_exception(Mongo::Error::FileNotFound) end end end end describe '#open_download_stream_by_name' do let(:files) do [ StringIO.new('hello 1'), StringIO.new('hello 2'), StringIO.new('hello 3'), StringIO.new('hello 4') ] end let(:io) do StringIO.new end context ' when using a session' do let(:options) do { session: session } end let(:operation) do fs.download_to_stream_by_name('test.txt', io) end let(:client) do authorized_client end before do files.each do |file| authorized_client.database.fs.upload_from_stream('test.txt', file) end end let(:io) do StringIO.new end it_behaves_like 'an operation using a session' end context 'when not using a session' do before do files.each do |file| fs.upload_from_stream('test.txt', file) end end context 'when a block is provided' do let(:stream) do fs.open_download_stream_by_name('test.txt') do |stream| io.write(stream.read) end end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end it 'closes the stream after the block completes' do expect(stream.closed?).to be(true) end it 'yields the stream to the block' do stream expect(io.size).to eq(files[0].size) end context 'when revision is not specified' do let!(:result) do fs.open_download_stream_by_name('test.txt') do |stream| io.write(stream.read) end end it 'returns the most recent version' do expect(io.string).to eq('hello 4') end end context 'when revision is 0' do let!(:result) do fs.open_download_stream_by_name('test.txt', revision: 0) do |stream| io.write(stream.read) end end it 'returns the original stored file' do expect(io.string).to eq('hello 1') end end context 'when revision is negative' do let!(:result) do fs.open_download_stream_by_name('test.txt', revision: -2) do |stream| io.write(stream.read) end end it 'returns that number of versions from the most recent' do expect(io.string).to eq('hello 3') end end context 'when revision is positive' do let!(:result) do fs.open_download_stream_by_name('test.txt', revision: 1) do |stream| io.write(stream.read) end end it 'returns that number revision' do expect(io.string).to eq('hello 2') end end context 'when the file revision is not found' do it 'raises a FileNotFound error' do expect { fs.open_download_stream_by_name('test.txt', revision: 100) }.to raise_exception(Mongo::Error::InvalidFileRevision) end end context 'when the file is not found' do it 'raises a FileNotFound error' do expect { fs.open_download_stream_by_name('non-existent.txt') }.to raise_exception(Mongo::Error::FileNotFound) end end end context 'when a block is not provided' do let!(:stream) do fs.open_download_stream_by_name('test.txt') end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end it 'does not close the stream' do expect(stream.closed?).to be(false) end it 'does not yield the stream to the block' do expect(io.size).to eq(0) end end end end end context 'when a write stream is opened' do let(:stream) do fs.open_upload_stream(filename) end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end describe '#open_upload_stream' do context 'when a block is not provided' do it 'returns a Stream::Write object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Write) end it 'creates an ObjectId for the file' do expect(stream.file_id).to be_a(BSON::ObjectId) end context 'when a custom file ID is provided' do let(:stream) do fs.open_upload_stream(filename, file_id: 'Custom ID') end it 'returns a Stream::Write object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Write) end it 'creates an ObjectId for the file' do expect(stream.file_id).to eq('Custom ID') end end end context 'when a block is provided' do context 'when a session is not used' do let!(:stream) do fs.open_upload_stream(filename) do |stream| stream.write(file) end end let(:result) do fs.find_one(filename: filename) end it 'returns the stream' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Write) end it 'creates an ObjectId for the file' do expect(stream.file_id).to be_a(BSON::ObjectId) end it 'yields the stream to the block' do expect(result.data.size).to eq(file.size) end it 'closes the stream when the block completes' do expect(stream.closed?).to be(true) end end end end describe '#upload_from_stream' do let!(:result) do fs.upload_from_stream(filename, file) end let(:file_from_db) do fs.find_one(:filename => filename) end it 'writes to the provided stream' do expect(file_from_db.data.length).to eq(file.size) end it 'does not close the stream' do expect(file.closed?).to be(false) end it 'returns the id of the file' do expect(result).to be_a(BSON::ObjectId) end context 'when the io stream raises an error' do let(:stream) do fs.open_upload_stream(filename) end before do allow(fs).to receive(:open_upload_stream).and_yield(stream) end context 'when stream#abort does not raise an OperationFailure' do before do expect(stream).to receive(:abort).and_call_original file.close end it 'raises the original IOError' do expect { fs.upload_from_stream(filename, file) }.to raise_exception(IOError) end end context 'when stream#abort raises an OperationFailure' do before do allow(stream).to receive(:abort).and_raise(Mongo::Error::OperationFailure) file.close end it 'raises the original IOError' do expect { fs.upload_from_stream(filename, file) }.to raise_exception(IOError) end end end end context 'when options are provided when opening the write stream' do let(:stream) do fs.open_upload_stream(filename, stream_options) end context 'when a custom file id is provided' do let(:stream_options) do { file_id: 'Custom ID' } end it 'sets the file id on the stream' do expect(stream.file_id).to eq('Custom ID') end end context 'when a write option is specified' do let(:stream_options) do { write: { w: 2 } } end it 'sets the write concern on the write stream' do expect(stream.write_concern.options).to eq(Mongo::WriteConcern.get(stream_options[:write]).options) end end context 'when there is a chunk size set on the FSBucket' do let(:stream_options) do { } end let(:options) do { chunk_size: 100 } end it 'sets the chunk size as the default on the write stream' do expect(stream.options[:chunk_size]).to eq(options[:chunk_size]) end end context 'when a chunk size option is specified' do let(:stream_options) do { chunk_size: 50 } end it 'sets the chunk size on the write stream' do expect(stream.options[:chunk_size]).to eq(stream_options[:chunk_size]) end context 'when there is a chunk size set on the FSBucket' do let(:options) do { chunk_size: 100 } end let(:fs) do described_class.new(authorized_client.database, options) end it 'uses the chunk size set on the write stream' do expect(stream.options[:chunk_size]).to eq(stream_options[:chunk_size]) end end end context 'when a file metadata option is specified' do let(:stream_options) do { metadata: { some_field: 1 } } end it 'sets the file metadata option on the write stream' do expect(stream.options[:metadata]).to eq(stream_options[:metadata]) end end context 'when a content type option is specified' do let(:stream_options) do { content_type: 'text/plain' } end it 'sets the content type on the write stream' do expect(stream.options[:content_type]).to eq(stream_options[:content_type]) end end context 'when a aliases option is specified' do let(:stream_options) do { aliases: [ 'another-name.txt' ] } end it 'sets the alias option on the write stream' do expect(stream.options[:aliases]).to eq(stream_options[:aliases]) end end end end end mongo-2.5.1/spec/mongo/grid/file_spec.rb0000644000004100000410000000751513257253113020154 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::File do describe '#==' do let(:file) do described_class.new('test', :filename => 'test.txt') end context 'when the object is not a file' do it 'returns false' do expect(file).to_not eq('testing') end end context 'when the object is a file' do context 'when the objects are equal' do it 'returns true' do expect(file).to eq(file) end end context 'when the objects are not equal' do let(:other) do described_class.new('tester', :filename => 'test.txt') end it 'returns false' do expect(file).to_not eq(other) end end end end describe '#initialize' do let(:data_size) do Mongo::Grid::File::Chunk::DEFAULT_SIZE * 3 end let(:data) do 'testing' end before do (1..data_size).each{ |i| data << '1' } end context 'when provided data and file information' do let(:file) do described_class.new(data, :filename => 'test.txt') end it 'creates the chunks' do expect(file.chunks.size).to eq(4) end it 'returns data' do expect(file.data).to eq(data) end end context 'when data is a ruby file' do let(:ruby_file) do File.open(__FILE__) end let(:data) do ruby_file.read end let(:file) do described_class.new(data, :filename => File.basename(ruby_file.path)) end it 'creates the chunks' do expect(file.chunks.size).to eq(4) end it 'returns data' do expect(file.data).to eq(data) end end context 'when data is an IO object' do let(:io) do StringIO.new('testing') end let(:file) do described_class.new(io, filename: "test.txt") end it 'creates the chunks' do expect(file.chunks).not_to be_empty end it 'returns data' do expect(file.data).to eq 'testing' end end context 'when using idiomatic ruby field names' do let(:time) do Time.now.utc end let(:file) do described_class.new( data, :filename => 'test.txt', :chunk_size => 100, :upload_date => time, :content_type => 'text/plain' ) end it 'normalizes the chunk size name' do expect(file.chunk_size).to eq(100) end it 'normalizes the upload date name' do expect(file.upload_date).to eq(time) end it 'normalizes the content type name' do expect(file.content_type).to eq('text/plain') end end context 'when provided chunks and file information' do let(:file_id) do BSON::ObjectId.new end let(:info) do BSON::Document.new( :_id => file_id, :uploadDate => Time.now.utc, :filename => 'test.txt', :chunkSize => Mongo::Grid::File::Chunk::DEFAULT_SIZE, :length => data.length, :contentType => Mongo::Grid::File::Info::DEFAULT_CONTENT_TYPE ) end let(:chunks) do Mongo::Grid::File::Chunk.split( data, Mongo::Grid::File::Info.new(info) ).map{ |chunk| chunk.document } end let(:file) do described_class.new(chunks, info) end it 'sets the chunks' do expect(file.chunks.size).to eq(4) end it 'assembles to data' do expect(file.data).to eq(data) end it 'sets the file information' do expect(file.info.id).to eq(info[:_id]) end end end describe '#inspect' do let(:file) do described_class.new('Hi', :filename => 'test.txt') end it 'includes the filename' do expect(file.inspect).to include('test.txt') end end end mongo-2.5.1/spec/mongo/grid/stream_spec.rb0000644000004100000410000000173413257253113020525 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Grid::FSBucket::Stream do let(:fs) do authorized_client.database.fs end describe '.get' do let(:stream) do described_class.get(fs, mode) end context 'when mode is read' do let(:mode) do Mongo::Grid::FSBucket::Stream::READ_MODE end it 'returns a Stream::Read object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Read) end end context 'when mode is write' do let(:mode) do Mongo::Grid::FSBucket::Stream::WRITE_MODE end it 'returns a Stream::Write object' do expect(stream).to be_a(Mongo::Grid::FSBucket::Stream::Write) end context 'when options are provided' do let(:stream) do described_class.get(fs, mode, chunk_size: 100) end it 'sets the options on the stream object' do expect(stream.options[:chunk_size]).to eq(100) end end end end endmongo-2.5.1/spec/mongo/bulk_write_spec.rb0000644000004100000410000017272413257253113020464 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::BulkWrite do before do authorized_collection.delete_many end after do authorized_collection.delete_many collection_with_validator.drop end let(:collection_with_validator) do begin; authorized_client[:validating].drop; rescue; end authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end end let(:collection_invalid_write_concern) do authorized_collection.client.with(write: INVALID_WRITE_CONCERN)[authorized_collection.name] end let(:collation) do { locale: 'en_US', strength: 2 } end let(:array_filters) do [{ 'i.y' => 3}] end let(:collection) do authorized_collection end let(:client) do authorized_client end describe '#execute' do shared_examples_for 'an executable bulk write' do context 'when providing a bad operation' do let(:requests) do [{ not_an_operation: { _id: 0 }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_error(Mongo::Error::InvalidBulkOperationType) end end context 'when the operations do not need to be split' do context 'when a write error occurs' do let(:requests) do [ { insert_one: { _id: 0 }}, { insert_one: { _id: 1 }}, { insert_one: { _id: 0 }}, { insert_one: { _id: 1 }} ] end let(:error) do begin bulk_write.execute rescue => e e end end it 'raises an exception' do expect { bulk_write.execute }.to raise_error(Mongo::Error::BulkWriteError) end it 'sets the document index on the error' do expect(error.result[Mongo::Error::WRITE_ERRORS].first['index']).to eq(2) end context 'when a session is provided' do let(:extra_options) do { session: session } end let(:client) do authorized_client end let(:failed_operation) do bulk_write.execute end it_behaves_like 'a failed operation using a session' end end context 'when provided a single insert one' do let(:requests) do [{ insert_one: { _id: 0 }}] end let(:result) do bulk_write.execute end it 'inserts the document' do expect(result.inserted_count).to eq(1) expect(authorized_collection.find(_id: 0).count).to eq(1) end it 'only inserts that document' do result expect(authorized_collection.find.first['_id']).to eq(0) end context 'when a session is provided' do let(:operation) do result end let(:extra_options) do { session: session } end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end end context 'when provided multiple insert ones' do let(:requests) do [ { insert_one: { _id: 0 }}, { insert_one: { _id: 1 }}, { insert_one: { _id: 2 }} ] end let(:result) do bulk_write.execute end it 'inserts the documents' do expect(result.inserted_count).to eq(3) expect(authorized_collection.find(_id: { '$in'=> [ 0, 1, 2 ]}).count).to eq(3) end context 'when there is a write failure' do let(:requests) do [{ insert_one: { _id: 1 }}, { insert_one: { _id: 1 }}] end it 'raises a BulkWriteError' do expect { bulk_write.execute }.to raise_error(Mongo::Error::BulkWriteError) end end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end end context 'when provided a single delete one' do let(:requests) do [{ delete_one: { filter: { _id: 0 }}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_one({ _id: 0 }) end it 'deletes the document' do expect(result.deleted_count).to eq(1) expect(authorized_collection.find(_id: 0).count).to eq(0) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ delete_one: { filter: { name: 'BANG' }, collation: collation } }] end context 'when the server selected supports collations', if: collation_enabled? do let!(:result) do bulk_write.execute end it 'applies the collation' do expect(authorized_collection.find(name: 'bang').count).to eq(0) end it 'reports the deleted count' do expect(result.deleted_count).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ delete_one: { filter: { name: 'BANG' }, 'collation' => collation } }] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ delete_one: { filter: { name: 'BANG' }}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: 'bang').count).to eq(1) end it 'reports the deleted count' do expect(result.deleted_count).to eq(0) end end end context 'when bulk executing update_one' do context 'when the write has specified arrayFilters' do before do authorized_collection.insert_one(_id: 1, x: [{ y: 1 }, { y: 2 }, { y: 3 }]) end let(:requests) do [{ update_one: { filter: { _id: 1 }, update: { '$set' => { 'x.$[i].y' => 5 } }, array_filters: array_filters, } }] end context 'when the server selected supports arrayFilters', if: array_filters_enabled? do let!(:result) do bulk_write.execute end it 'applies the arrayFilters' do expect(result.matched_count).to eq(1) expect(result.modified_count).to eq(1) expect(authorized_collection.find(_id: 1).first['x'].last['y']).to eq(5) end end context 'when the server selected does not support arrayFilters', unless: array_filters_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end end end context 'when bulk executing update_many' do context 'when the write has specified arrayFilters' do before do authorized_collection.insert_many([{ _id: 1, x: [ { y: 1 }, { y: 2 }, { y: 3 } ] }, { _id: 2, x: [ { y: 3 }, { y: 2 }, { y: 1 } ] }]) end let(:selector) do { '$or' => [{ _id: 1 }, { _id: 2 }]} end let(:requests) do [{ update_many: { filter: { '$or' => [{ _id: 1 }, { _id: 2 }]}, update: { '$set' => { 'x.$[i].y' => 5 } }, array_filters: array_filters, } }] end context 'when the server selected supports arrayFilters', if: array_filters_enabled? do let!(:result) do bulk_write.execute end it 'applies the arrayFilters' do expect(result.matched_count).to eq(2) expect(result.modified_count).to eq(2) docs = authorized_collection.find(selector, sort: { _id: 1 }).to_a expect(docs[0]['x']).to eq ([{ 'y' => 1 }, { 'y' => 2 }, { 'y' => 5}]) expect(docs[1]['x']).to eq ([{ 'y' => 5 }, { 'y' => 2 }, { 'y' => 1}]) end end context 'when the server selected does not support arrayFilters', unless: array_filters_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end end end context 'when multiple documents match delete selector' do before do authorized_collection.insert_many([{ a: 1 }, { a: 1 }]) end let(:requests) do [{ delete_one: { filter: { a: 1 }}}] end it 'reports n_removed correctly' do expect(bulk_write.execute.deleted_count).to eq(1) end it 'deletes only matching documents' do bulk_write.execute expect(authorized_collection.find(a: 1).count).to eq(1) end end end context 'when provided multiple delete ones' do let(:requests) do [ { delete_one: { filter: { _id: 0 }}}, { delete_one: { filter: { _id: 1 }}}, { delete_one: { filter: { _id: 2 }}} ] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([ { _id: 0 }, { _id: 1 }, { _id: 2 } ]) end it 'deletes the documents' do expect(result.deleted_count).to eq(3) expect(authorized_collection.find(_id: { '$in'=> [ 0, 1, 2 ]}).count).to eq(0) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ delete_one: { filter: { name: 'BANG' }, collation: collation }}, { delete_one: { filter: { name: 'DOINK' }, collation: collation }}] end context 'when the server selected supports collations', if: collation_enabled? do let!(:result) do bulk_write.execute end it 'applies the collation' do expect(authorized_collection.find(name: { '$in' => ['bang', 'doink']}).count).to eq(0) end it 'reports the deleted count' do expect(result.deleted_count).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ delete_one: { filter: { name: 'BANG' }, 'collation' => collation }}, { delete_one: { filter: { name: 'DOINK' }, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the write does not have a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ delete_one: { filter: { name: 'BANG' }}}, { delete_one: { filter: { name: 'DOINK' }}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: { '$in' => ['bang', 'doink']}).count).to eq(2) end it 'reports the deleted count' do expect(result.deleted_count).to eq(0) end end end context 'when provided a single delete many' do let(:requests) do [{ delete_many: { filter: { _id: 0 }}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_one({ _id: 0 }) end it 'deletes the documents' do expect(result.deleted_count).to eq(1) expect(authorized_collection.find(_id: 0).count).to eq(0) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ delete_many: { filter: { name: 'BANG' }, collation: collation }}] end context 'when the server selected supports collations', if: collation_enabled? do let!(:result) do bulk_write.execute end it 'applies the collation' do expect(authorized_collection.find(name: 'bang').count).to eq(0) end it 'reports the deleted count' do expect(result.deleted_count).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ delete_many: { filter: { name: 'BANG' }, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ delete_many: { filter: { name: 'BANG' }}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: 'bang').count).to eq(2) end it 'reports the deleted count' do expect(result.deleted_count).to eq(0) end end end context 'when provided multiple delete many ops' do let(:requests) do [ { delete_many: { filter: { _id: 0 }}}, { delete_many: { filter: { _id: 1 }}}, { delete_many: { filter: { _id: 2 }}} ] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([ { _id: 0 }, { _id: 1 }, { _id: 2 } ]) end it 'deletes the documents' do expect(result.deleted_count).to eq(3) expect(authorized_collection.find(_id: { '$in'=> [ 0, 1, 2 ]}).count).to eq(0) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do {session: session} end it_behaves_like 'an operation using a session' end end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ delete_many: { filter: { name: 'BANG' }, collation: collation }}, { delete_many: { filter: { name: 'DOINK' }, collation: collation }}] end context 'when the server selected supports collations', if: collation_enabled? do let!(:result) do bulk_write.execute end it 'applies the collation' do expect(authorized_collection.find(name: { '$in' => ['bang', 'doink'] }).count).to eq(0) end it 'reports the deleted count' do expect(result.deleted_count).to eq(3) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ delete_many: { filter: { name: 'BANG' }, 'collation' => collation }}, { delete_many: { filter: { name: 'DOINK' }, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ delete_many: { filter: { name: 'BANG' }}}, { delete_many: { filter: { name: 'DOINK' }}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: { '$in' => ['bang', 'doink'] }).count).to eq(3) end it 'reports the deleted count' do expect(result.deleted_count).to eq(0) end end end context 'when providing a single replace one' do let(:requests) do [{ replace_one: { filter: { _id: 0 }, replacement: { name: 'test' }}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_one({ _id: 0 }) end it 'replaces the document' do expect(result.modified_count).to eq(1) expect(authorized_collection.find(_id: 0).first[:name]).to eq('test') end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ replace_one: { filter: { name: 'BANG' }, replacement: { other: 'pong' }, collation: collation }}] end context 'when the server selected supports collations' do let!(:result) do bulk_write.execute end it 'applies the collation', if: collation_enabled? do expect(authorized_collection.find(other: 'pong').count).to eq(1) end it 'reports the upserted id', if: collation_enabled? do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count', if: collation_enabled? do expect(result.upserted_count).to eq(0) end it 'reports the modified count', if: collation_enabled? do expect(result.modified_count).to eq(1) end it 'reports the matched count', if: collation_enabled? do expect(result.matched_count).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ replace_one: { filter: { name: 'BANG' }, replacement: { other: 'pong' }, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the write does not have a collation specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ replace_one: { filter: { name: 'BANG' }, replacement: { other: 'pong' }}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(other: 'pong').count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end end end context 'when providing a single update one' do context 'when upsert is false' do let(:requests) do [{ update_one: { filter: { _id: 0 }, update: { "$set" => { name: 'test' }}}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_one({ _id: 0 }) end it 'updates the document' do result expect(authorized_collection.find(_id: 0).first[:name]).to eq('test') end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(1) end it 'reports the matched count' do expect(result.matched_count).to eq(1) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when documents match but are not modified' do before do authorized_collection.insert_one({ a: 0 }) end let(:requests) do [{ update_one: { filter: { a: 0 }, update: { "$set" => { a: 0 }}}}] end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(1) end end context 'when the number of updates exceeds the max batch size' do let(:batch_size) do 11 end before do allow(client.cluster.next_primary).to receive(:max_write_batch_size).and_return(batch_size - 1) end let(:requests) do batch_size.times.collect do |i| { update_one: { filter: { a: i }, update: { "$set" => { a: i, b: 3 }}, upsert: true }} end end it 'updates the documents and reports the correct number of upserted ids' do expect(result.upserted_ids.size).to eq(batch_size) expect(authorized_collection.find(b: 3).count).to eq(batch_size) end end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end context 'when a session is provided' do let(:extra_options) do {session: session} end let(:client) do collection_invalid_write_concern.client end let(:failed_operation) do bulk_write_invalid_write_concern.execute end it_behaves_like 'a failed operation using a session' end end end context 'when upsert is true' do let(:requests) do [{ update_one: { filter: { _id: 0 }, update: { "$set" => { name: 'test' } }, upsert: true }}] end let(:result) do bulk_write.execute end it 'updates the document' do result expect(authorized_collection.find(_id: 0).first[:name]).to eq('test') end it 'reports the upserted count' do expect(result.upserted_count).to eq(1) end it 'reports the modified_count count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([0]) end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, collation: collation }}] end context 'when the server selected supports collations' do let!(:result) do bulk_write.execute end it 'applies the collation', if: collation_enabled? do expect(authorized_collection.find(name: 'pong').count).to eq(1) end it 'reports the upserted id', if: collation_enabled? do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count', if: collation_enabled? do expect(result.upserted_count).to eq(0) end it 'reports the modified count', if: collation_enabled? do expect(result.modified_count).to eq(1) end it 'reports the matched count', if: collation_enabled? do expect(result.matched_count).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the write does not have a collation specified' do before do authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: 'pong').count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end end end context 'when providing multiple update ones' do context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, collation: collation }}, { update_one: { filter: { name: 'DOINK' }, update: { "$set" => { name: 'pong' }}, collation: collation }}] end context 'when the server selected supports collations' do let!(:result) do bulk_write.execute end it 'applies the collation', if: collation_enabled? do expect(authorized_collection.find(name: 'pong').count).to eq(2) end it 'reports the upserted id', if: collation_enabled? do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count', if: collation_enabled? do expect(result.upserted_count).to eq(0) end it 'reports the modified count', if: collation_enabled? do expect(result.modified_count).to eq(2) end it 'reports the matched count', if: collation_enabled? do expect(result.matched_count).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, 'collation' => collation }}, { update_one: { filter: { name: 'DOINK' }, update: { "$set" => { name: 'pong' }}, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the write does not have a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'doink') end let(:requests) do [{ update_one: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}}}, { update_one: { filter: { name: 'DOINK' }, update: { "$set" => { name: 'pong' }}}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: 'pong').count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end end context 'when upsert is false' do let(:requests) do [{ update_one: { filter: { _id: 0 }, update: { "$set" => { name: 'test' }}}}, { update_one: { filter: { _id: 1 }, update: { "$set" => { name: 'test' }}}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([{ _id: 0 }, { _id: 1 }]) end it 'updates the document' do result expect(authorized_collection.find(name: 'test').count).to eq(2) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(2) end it 'reports the matched count' do expect(result.matched_count).to eq(2) end context 'when there is a mix of updates and matched without an update' do let(:requests) do [{ update_one: { filter: { a: 0 }, update: { "$set" => { a: 1 }}}}, { update_one: { filter: { a: 2 }, update: { "$set" => { a: 2 }}}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([{ a: 0 }, { a: 2 }]) end it 'updates the document' do result expect(authorized_collection.find(a: { '$lt' => 3 }).count).to eq(2) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(1) end it 'reports the matched count' do expect(result.matched_count).to eq(2) end end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when upsert is true' do let(:requests) do [{ update_one: { filter: { _id: 0 }, update: { "$set" => { name: 'test' }}, upsert: true }}, { update_one: { filter: { _id: 1 }, update: { "$set" => { name: 'test1' }}, upsert: true }}] end let(:result) do bulk_write.execute end it 'updates the document' do expect(result.modified_count).to eq(0) expect(authorized_collection.find(name: { '$in' => ['test', 'test1'] }).count).to eq(2) end it 'reports the upserted count' do expect(result.upserted_count).to eq(2) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([0, 1]) end context 'when there is a mix of updates, upsert, and matched without an update' do let(:requests) do [{ update_one: { filter: { a: 0 }, update: { "$set" => { a: 1 }}}}, { update_one: { filter: { a: 2 }, update: { "$set" => { a: 2 }}}}, { update_one: { filter: { _id: 3 }, update: { "$set" => { a: 4 }}, upsert: true }}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([{ a: 0 }, { a: 2 }]) end it 'updates the documents' do result expect(authorized_collection.find(a: { '$lt' => 3 }).count).to eq(2) expect(authorized_collection.find(a: 4).count).to eq(1) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([3]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(1) end it 'reports the modified count' do expect(result.modified_count).to eq(1) end it 'reports the matched count' do expect(result.matched_count).to eq(2) end end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end end end end context 'when providing a single update many' do context 'when the write has a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ update_many: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, collation: collation }}] end context 'when the server selected supports collations' do let!(:result) do bulk_write.execute end it 'applies the collation', if: collation_enabled? do expect(authorized_collection.find(name: 'pong').count).to eq(2) end it 'reports the upserted id', if: collation_enabled? do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count', if: collation_enabled? do expect(result.upserted_count).to eq(0) end it 'reports the modified count', if: collation_enabled? do expect(result.modified_count).to eq(2) end it 'reports the matched count', if: collation_enabled? do expect(result.matched_count).to eq(2) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:requests) do [{ update_many: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}, 'collation' => collation }}] end it 'raises an exception' do expect { bulk_write.execute }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the write does not have a collation specified' do before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:requests) do [{ update_many: { filter: { name: 'BANG' }, update: { "$set" => { name: 'pong' }}}}] end let!(:result) do bulk_write.execute end it 'does not apply the collation' do expect(authorized_collection.find(name: 'pong').count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the matched count' do expect(result.matched_count).to be(0) end end context 'when upsert is false' do let(:requests) do [{ update_many: { filter: { a: 0 }, update: { "$set" => { name: 'test' }}}}] end let(:result) do bulk_write.execute end before do authorized_collection.insert_many([{ a: 0 }, { a: 0 }]) end it 'updates the documents' do expect(authorized_collection.find(a: 0).count).to eq(2) end it 'reports the upserted ids' do expect(result.upserted_ids).to eq([]) end it 'reports the upserted count' do expect(result.upserted_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(2) end it 'reports the matched count' do expect(result.matched_count).to eq(2) end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when upsert is true' do let(:requests) do [{ update_many: { filter: { _id: 0 }, update: { "$set" => { name: 'test' }}, upsert: true }}] end let(:result) do bulk_write.execute end it 'updates the document' do result expect(authorized_collection.find(name: 'test').count).to eq(1) end it 'reports the upserted count' do expect(result.upserted_count).to eq(1) end it 'reports the matched count' do expect(result.matched_count).to eq(0) end it 'reports the modified count' do expect(result.modified_count).to eq(0) end it 'reports the upserted id' do expect(result.upserted_ids).to eq([0]) end context 'when there is a write concern error' do it 'raises an OperationFailure', if: standalone? do expect { bulk_write_invalid_write_concern.execute }.to raise_error(Mongo::Error::OperationFailure) end end end end end context 'when the operations need to be split' do let(:batch_size) do 11 end before do allow(client.cluster.next_primary).to receive(:max_write_batch_size).and_return(batch_size - 1) end context 'when a write error occurs' do let(:requests) do batch_size.times.map do |i| { insert_one: { _id: i }} end end let(:error) do begin bulk_write.execute rescue => e e end end it 'raises an exception' do expect { requests.push({ insert_one: { _id: 5 }}) bulk_write.execute }.to raise_error(Mongo::Error::BulkWriteError) end it 'sets the document index on the error' do requests.push({ insert_one: { _id: 5 }}) expect(error.result[Mongo::Error::WRITE_ERRORS].first['index']).to eq(batch_size) end end context 'when no write errors occur' do let(:requests) do batch_size.times.map do |i| { insert_one: { _id: i }} end end let(:result) do bulk_write.execute end it 'inserts the documents' do expect(result.inserted_count).to eq(batch_size) end it 'combines the inserted ids' do expect(result.inserted_ids.size).to eq(batch_size) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end context 'when retryable writes are supported', if: test_sessions? do let(:client) do authorized_client_with_retry_writes end let(:collection) do client[authorized_collection.name] end let!(:result) do bulk_write.execute end let(:first_txn_number) do EventSubscriber.started_events[-2].command['txnNumber'].instance_variable_get(:@integer) end let(:second_txn_number) do EventSubscriber.started_events[-1].command['txnNumber'].instance_variable_get(:@integer) end it 'inserts the documents' do expect(result.inserted_count).to eq(batch_size) end it 'combines the inserted ids' do expect(result.inserted_ids.size).to eq(batch_size) end it 'increments the transaction number' do expect(first_txn_number + 1). to eq(second_txn_number) end end end end context 'when an operation exceeds the max bson size' do let(:requests) do 5.times.map do |i| { insert_one: { _id: i, x: 'y' * 4000000 }} end end let(:result) do bulk_write.execute end it 'inserts the documents' do expect(result.inserted_count).to eq(5) end context 'when a session is provided' do let(:operation) do result end let(:client) do authorized_client end let(:extra_options) do { session: session } end it_behaves_like 'an operation using a session' end end end context 'when the bulk write is unordered' do let(:bulk_write) do described_class.new(collection, requests, options) end let(:options) do { ordered: false }.merge(extra_options) end let(:extra_options) do {} end let(:bulk_write_invalid_write_concern) do described_class.new(collection_invalid_write_concern, requests, options) end it_behaves_like 'an executable bulk write' end context 'when the bulk write is ordered' do let(:bulk_write) do described_class.new(collection, requests, options) end let(:options) do { ordered: true }.merge(extra_options) end let(:extra_options) do {} end let(:bulk_write_invalid_write_concern) do described_class.new(collection_invalid_write_concern, requests, options) end it_behaves_like 'an executable bulk write' end end describe '#initialize' do let(:requests) do [{ insert_one: { _id: 0 }}] end shared_examples_for 'a bulk write initializer' do it 'sets the collection' do expect(bulk_write.collection).to eq(authorized_collection) end it 'sets the requests' do expect(bulk_write.requests).to eq(requests) end end context 'when no options are provided' do let(:bulk_write) do described_class.new(authorized_collection, requests) end it 'sets empty options' do expect(bulk_write.options).to be_empty end it_behaves_like 'a bulk write initializer' end context 'when options are provided' do let(:bulk_write) do described_class.new(authorized_collection, requests, ordered: true) end it 'sets the options' do expect(bulk_write.options).to eq(ordered: true) end end context 'when nil options are provided' do let(:bulk_write) do described_class.new(authorized_collection, requests, nil) end it 'sets empty options' do expect(bulk_write.options).to be_empty end end end describe '#ordered?' do context 'when no option provided' do let(:bulk_write) do described_class.new(authorized_collection, []) end it 'returns true' do expect(bulk_write).to be_ordered end end context 'when the option is provided' do context 'when the option is true' do let(:bulk_write) do described_class.new(authorized_collection, [], ordered: true) end it 'returns true' do expect(bulk_write).to be_ordered end end context 'when the option is false' do let(:bulk_write) do described_class.new(authorized_collection, [], ordered: false) end it 'returns false' do expect(bulk_write).to_not be_ordered end end end end describe 'when the collection has a validator', if: find_command_enabled? do before do collection_with_validator.insert_many([{ :a => 1 }, { :a => 2 }]) end after do collection_with_validator.delete_many end context 'when the documents are invalid' do let(:ops) do [ { insert_one: { :x => 1 } }, { update_one: { filter: { :a => 1 }, update: { '$unset' => { :a => '' } } } }, { replace_one: { filter: { :a => 2 }, replacement: { :x => 2 } } } ] end context 'when bypass_document_validation is not set' do let(:result) do collection_with_validator.bulk_write(ops) end it 'raises BulkWriteError' do expect { result }.to raise_exception(Mongo::Error::BulkWriteError) end end context 'when bypass_document_validation is true' do let(:result2) do collection_with_validator.bulk_write( ops, :bypass_document_validation => true) end it 'executes successfully' do expect(result2.modified_count).to eq(2) expect(result2.inserted_count).to eq(1) end end end end end mongo-2.5.1/spec/mongo/connection_string_spec.rb0000644000004100000410000000636313257253113022035 0ustar www-datawww-datarequire 'spec_helper' describe 'ConnectionString' do include Mongo::ConnectionString CONNECTION_STRING_TESTS.each do |file| spec = Mongo::ConnectionString::Spec.new(file) context(spec.description) do before(:all) do module Mongo class Address private alias :original_initialize_resolver! :initialize_resolver! def initialize_resolver!(timeout, ssl_options) family = (host == 'localhost') ? ::Socket::AF_INET : ::Socket::AF_UNSPEC info = ::Socket.getaddrinfo(host, nil, family, ::Socket::SOCK_STREAM) FAMILY_MAP[info.first[4]].new(info[3], port, host) end end class Server # The constructor keeps the same API, but does not instantiate a # monitor and run it. alias :original_initialize :initialize def initialize(address, cluster, monitoring, event_listeners, options = {}) @address = address @cluster = cluster @monitoring = monitoring @options = options.freeze @monitor = Monitor.new(address, event_listeners, options) end # Disconnect simply needs to return true since we have no monitor and # no connection. alias :original_disconnect! :disconnect! def disconnect!; true; end end end end after(:all) do module Mongo # Return the implementations to their originals for the other # tests in the suite. class Address alias :initialize_resolver! :original_initialize_resolver! remove_method(:original_initialize_resolver!) end class Server alias :initialize :original_initialize remove_method(:original_initialize) alias :disconnect! :original_disconnect! remove_method(:original_disconnect!) end end end spec.tests.each_with_index do |test, index| context "when a #{test.description} is provided" do context 'when the uri is invalid', unless: test.valid? do it 'raises an error' do expect{ test.uri }.to raise_exception(Mongo::Error::InvalidURI) end end context 'when the uri should warn', if: test.warn? do before do expect(Mongo::Logger.logger).to receive(:warn) end it 'warns' do expect(test.client).to be_a(Mongo::Client) end end context 'when the uri is valid', if: test.valid? do it 'does not raise an exception' do expect(test.uri).to be_a(Mongo::URI) end it 'creates a client with the correct hosts' do expect(test.client).to have_hosts(test) end it 'creates a client with the correct authentication properties' do expect(test.client).to match_auth(test) end it 'creates a client with the correct options' do expect(test.client).to match_options(test) end end end end end end end mongo-2.5.1/spec/mongo/server_selector_spec.rb0000644000004100000410000002236013257253113021511 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector do include_context 'server selector' describe '.get' do let(:selector) do described_class.get(:mode => name, :tag_sets => tag_sets) end context 'when a server selector object is passed' do let(:name) do :primary end it 'returns the object' do expect(described_class.get(selector)).to be(selector) end end context 'when the mode is primary' do let(:name) do :primary end it 'returns a read preference of class Primary' do expect(selector).to be_a(Mongo::ServerSelector::Primary) end end context 'when the mode is primary_preferred' do let(:name) do :primary_preferred end it 'returns a read preference of class PrimaryPreferred' do expect(selector).to be_a(Mongo::ServerSelector::PrimaryPreferred) end end context 'when the mode is secondary' do let(:name) do :secondary end it 'returns a read preference of class Secondary' do expect(selector).to be_a(Mongo::ServerSelector::Secondary) end end context 'when the mode is secondary_preferred' do let(:name) do :secondary_preferred end it 'returns a read preference of class SecondaryPreferred' do expect(selector).to be_a(Mongo::ServerSelector::SecondaryPreferred) end end context 'when the mode is nearest' do let(:name) do :nearest end it 'returns a read preference of class Nearest' do expect(selector).to be_a(Mongo::ServerSelector::Nearest) end end context 'when a mode is not provided' do let(:selector) { described_class.get } it 'returns a read preference of class Primary' do expect(selector).to be_a(Mongo::ServerSelector::Primary) end end context 'when tag sets are provided' do let(:selector) do described_class.get(:mode => :secondary, :tag_sets => tag_sets) end let(:tag_sets) do [{ 'test' => 'tag' }] end it 'sets tag sets on the read preference object' do expect(selector.tag_sets).to eq(tag_sets) end end context 'when server_selection_timeout is specified' do let(:selector) do described_class.get(:mode => :secondary, :server_selection_timeout => 1) end it 'sets server selection timeout on the read preference object' do expect(selector.server_selection_timeout).to eq(1) end end context 'when server_selection_timeout is not specified' do let(:selector) do described_class.get(:mode => :secondary) end it 'sets server selection timeout to the default' do expect(selector.server_selection_timeout).to eq(Mongo::ServerSelector::SERVER_SELECTION_TIMEOUT) end end context 'when local_threshold is specified' do let(:selector) do described_class.get(:mode => :secondary, :local_threshold => 0.010) end it 'sets local_threshold on the read preference object' do expect(selector.local_threshold).to eq(0.010) end end context 'when local_threshold is not specified' do let(:selector) do described_class.get(:mode => :secondary) end it 'sets local threshold to the default' do expect(selector.local_threshold).to eq(Mongo::ServerSelector::LOCAL_THRESHOLD) end end end describe "#select_server" do context 'when #select returns a list of nils' do let(:servers) do [ make_server(:primary) ] end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:servers).and_return(servers) allow(c).to receive(:single?).and_return(false) allow(c).to receive(:sharded?).and_return(false) allow(c).to receive(:unknown?).and_return(false) allow(c).to receive(:scan!).and_return(true) allow(c).to receive(:options).and_return(server_selection_timeout: 0.1) end end let(:read_pref) do described_class.get(mode: :primary).tap do |pref| allow(pref).to receive(:select).and_return([ nil, nil ]) end end it 'raises a NoServerAvailable error' do expect do read_pref.select_server(cluster) end.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the cluster has a server_selection_timeout set' do let(:servers) do [ make_server(:secondary), make_server(:primary) ] end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:servers).and_return(servers) allow(c).to receive(:single?).and_return(false) allow(c).to receive(:sharded?).and_return(false) allow(c).to receive(:unknown?).and_return(false) allow(c).to receive(:scan!).and_return(true) allow(c).to receive(:options).and_return(server_selection_timeout: 0) end end let(:read_pref) do described_class.get(mode: :nearest) end it 'uses the server_selection_timeout of the cluster' do expect{ read_pref.select_server(cluster) }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when the cluster has a local_threshold set' do let(:near_server) do make_server(:secondary).tap do |s| allow(s).to receive(:connectable?).and_return(true) allow(s).to receive(:average_round_trip_time).and_return(100) allow(s).to receive(:check_driver_support!).and_return(true) end end let(:far_server) do make_server(:secondary).tap do |s| allow(s).to receive(:connectable?).and_return(true) allow(s).to receive(:average_round_trip_time).and_return(200) allow(s).to receive(:check_driver_support!).and_return(true) end end let(:servers) do [ near_server, far_server ] end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:servers).and_return(servers) allow(c).to receive(:single?).and_return(false) allow(c).to receive(:sharded?).and_return(false) allow(c).to receive(:unknown?).and_return(false) allow(c).to receive(:scan!).and_return(true) allow(c).to receive(:options).and_return(local_threshold: 0.050) end end let(:read_pref) do described_class.get(mode: :nearest) end it 'uses the local_threshold of the cluster' do expect(read_pref.select_server(cluster)).to eq(near_server) end end end shared_context 'a ServerSelector' do context 'when cluster#servers is empty' do let(:servers) do [] end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:servers).and_return(servers) allow(c).to receive(:single?).and_return(single) allow(c).to receive(:sharded?).and_return(sharded) allow(c).to receive(:unknown?).and_return(false) allow(c).to receive(:scan!).and_return(true) allow(c).to receive(:options).and_return(server_selection_timeout: 0.1) end end let(:read_pref) do described_class.get(mode: :primary) end it 'raises a NoServerAvailable error' do expect do read_pref.select_server(cluster) end.to raise_exception(Mongo::Error::NoServerAvailable) end end end context 'when the cluster has a Single topology' do let(:single) { true } let(:sharded) { false } it_behaves_like 'a ServerSelector' end context 'when the cluster has a ReplicaSet topology' do let(:single) { false } let(:sharded) { false } it_behaves_like 'a ServerSelector' end context 'when the cluster has a Sharded topology' do let(:single) { false } let(:sharded) { true } it_behaves_like 'a ServerSelector' end describe '#inspect' do let(:options) do {} end let(:read_pref) do described_class.get({ mode: mode }.merge(options)) end context 'when the mode is primary' do let(:mode) do :primary end it 'includes the mode in the inspect string' do expect(read_pref.inspect).to match(/#{mode.to_s}/i) end end context 'when there are tag sets' do let(:mode) do :secondary end let(:options) do { tag_sets: [{ 'data_center' => 'nyc' }] } end it 'includes the tag sets in the inspect string' do expect(read_pref.inspect).to include(options[:tag_sets].inspect) end end context 'when there is a max staleness set' do let(:mode) do :secondary end let(:options) do { max_staleness: 123 } end it 'includes the tag sets in the inspect string' do expect(read_pref.inspect).to match(/max_staleness/i) expect(read_pref.inspect).to match(/123/) end end end end mongo-2.5.1/spec/mongo/error/0000755000004100000410000000000013257253113016072 5ustar www-datawww-datamongo-2.5.1/spec/mongo/error/parser_spec.rb0000644000004100000410000000451113257253113020726 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Error::Parser do describe '#message' do let(:parser) do described_class.new(document) end context 'when the document contains no error message' do let(:document) do { 'ok' => 1 } end it 'returns an empty string' do expect(parser.message).to be_empty end end context 'when the document contains an errmsg' do let(:document) do { 'errmsg' => 'no such command: notacommand', 'code'=>59 } end it 'returns the message' do expect(parser.message).to eq('no such command: notacommand (59)') end end context 'when the document contains writeErrors' do context 'when only a single error exists' do let(:document) do { 'writeErrors' => [{ 'code' => 9, 'errmsg' => 'Unknown modifier: $st' }]} end it 'returns the message' do expect(parser.message).to eq('Unknown modifier: $st (9)') end end context 'when multiple errors exist' do let(:document) do { 'writeErrors' => [ { 'code' => 9, 'errmsg' => 'Unknown modifier: $st' }, { 'code' => 9, 'errmsg' => 'Unknown modifier: $bl' } ] } end it 'returns the messages concatenated' do expect(parser.message).to eq( 'Unknown modifier: $st (9), Unknown modifier: $bl (9)' ) end end end context 'when the document contains $err' do let(:document) do { '$err' => 'not authorized for query', 'code' => 13 } end it 'returns the message' do expect(parser.message).to eq('not authorized for query (13)') end end context 'when the document contains err' do let(:document) do { 'err' => 'not authorized for query', 'code' => 13 } end it 'returns the message' do expect(parser.message).to eq('not authorized for query (13)') end end context 'when the document contains a writeConcernError' do let(:document) do { 'writeConcernError' => { 'code' => 100, 'errmsg' => 'Not enough data-bearing nodes' } } end it 'returns the message' do expect(parser.message).to eq('Not enough data-bearing nodes (100)') end end end end mongo-2.5.1/spec/mongo/client_spec.rb0000644000004100000410000012252313257253113017563 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Client do describe '#==' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end after do client.close end context 'when the other is a client' do context 'when the options and cluster are equal' do let(:other) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end it 'returns true' do expect(client).to eq(other) end end context 'when the options and cluster are not equal' do let(:other) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :secondary }, :database => TEST_DB ) end it 'returns true' do expect(client).not_to eq(other) end end end context 'when the other is not a client' do it 'returns false' do expect(client).not_to eq('test') end end end describe '#[]' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB) end shared_examples_for 'a collection switching object' do before do client.use(:dbtest) end it 'returns the new collection' do expect(collection.name).to eq('users') end end context 'when provided a string' do let(:collection) do client['users'] end it_behaves_like 'a collection switching object' end context 'when provided a symbol' do let(:collection) do client[:users] end it_behaves_like 'a collection switching object' end end describe '#eql' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end context 'when the other is a client' do context 'when the options and cluster are equal' do let(:other) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end it 'returns true' do expect(client).to eql(other) end end context 'when the options and cluster are not equal' do let(:other) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :secondary }, :database => TEST_DB ) end it 'returns true' do expect(client).not_to eql(other) end end end context 'when the other is not a client' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end it 'returns false' do expect(client).not_to eql('test') end end end describe '#hash' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :local_threshold => 0.010, :server_selection_timeout => 10000, :database => TEST_DB ) end let(:options) do Mongo::Options::Redacted.new(:read => { :mode => :primary }, :local_threshold => 0.010, :server_selection_timeout => 10000, :database => TEST_DB) end let(:expected) do [client.cluster, options].hash end it 'returns a hash of the cluster and options' do expect(client.hash).to eq(expected) end end describe '#inspect' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end it 'returns the cluster information' do expect(client.inspect).to include( " { :mode => :primary }, :database => TEST_DB, :password => 'some_password', :user => 'emily' ) end it 'does not print out sensitive data' do expect(client.inspect).not_to match('some_password') end end end describe '#initialize' do context 'when providing options' do context 'when retry_writes is defined' do let(:options) do { retry_writes: true } end let(:client) do described_class.new([default_address.seed], authorized_client.options.merge(options)) end after do client.close end it 'sets the option' do expect(client.options['retry_writes']).to eq(options[:retry_writes]) end end context 'when compressors are provided' do let(:client) do described_class.new([default_address.seed], authorized_client.options.merge(options)) end after do client.close end context 'when the compressor is supported' do let(:options) do { compressors: ['zlib'] } end it 'sets the compressor' do expect(client.options['compressors']).to eq(options[:compressors]) end it 'sends the compressor in the compression key of the handshake document' do expect(client.cluster.app_metadata.send(:document)[:compression]).to eq(options[:compressors]) end it 'uses compression for messages', if: testing_compression? do expect(Mongo::Protocol::Compressed).to receive(:new).and_call_original client[TEST_COLL].find({}, limit: 1).first end it 'does not use compression for authentication messages' do expect(Mongo::Protocol::Compressed).not_to receive(:new) client.cluster.next_primary.send(:with_connection) do |conn| conn.send(:authenticate!) end end end context 'when the compressor is not supported by the driver' do let(:options) do { compressors: ['snoopy'] } end it 'does not set the compressor and warns' do expect(Mongo::Logger.logger).to receive(:warn) expect(client.options['compressors']).to be_nil end it 'sets the compression key of the handshake document to an empty array' do expect(client.cluster.app_metadata.send(:document)[:compression]).to eq([]) end context 'when one supported compressor and one unsupported compressor are provided', if: compression_enabled? do let(:options) do { compressors: ['zlib', 'snoopy'] } end it 'does not set the unsupported compressor and warns' do expect(Mongo::Logger.logger).to receive(:warn).at_least(:once) expect(client.options['compressors']).to eq(['zlib']) end it 'sets the compression key of the handshake document to the list of supported compressors' do expect(client.cluster.app_metadata.send(:document)[:compression]).to eq(['zlib']) end end end context 'when the compressor is not supported by the server', unless: collation_enabled? do let(:options) do { compressors: ['zlib'] } end it 'does not set the compressor and warns' do expect(Mongo::Logger.logger).to receive(:warn).at_least(:once) expect(client.cluster.next_primary.monitor.compressor).to be_nil end end end context 'when compressors are not provided', unless: compression_enabled? do let(:client) do authorized_client end it 'does not set the compressor' do expect(client.options['compressors']).to be_nil end it 'sets the compression key of the handshake document to an empty array' do expect(client.cluster.app_metadata.send(:document)[:compression]).to eq([]) end it 'does not use compression for messages' do client[TEST_COLL].find({}, limit: 1).first expect(Mongo::Protocol::Compressed).not_to receive(:new) end end context 'when a zlib_compression_level option is provided', if: testing_compression? do let(:client) do described_class.new([default_address.seed], TEST_OPTIONS.merge(zlib_compression_level: 1)) end it 'sets the option on the client' do expect(client.options[:zlib_compression_level]).to eq(1) end end context 'when ssl options are provided' do let(:options) do { :ssl => true, :ssl_ca_cert => CA_PEM, :ssl_ca_cert_string => 'ca cert string', :ssl_ca_cert_object => 'ca cert object', :ssl_cert => CLIENT_CERT_PEM, :ssl_cert_string => 'cert string', :ssl_cert_object => 'cert object', :ssl_key => CLIENT_KEY_PEM, :ssl_key_string => 'key string', :ssl_key_object => 'key object', :ssl_key_pass_phrase => 'passphrase', :ssl_verify => true } end let(:client) do described_class.new(['127.0.0.1:27017'], TEST_OPTIONS.merge(options)) end it 'sets the ssl option' do expect(client.options[:ssl]).to eq(options[:ssl]) end it 'sets the ssl_ca_cert option' do expect(client.options[:ssl_ca_cert]).to eq(options[:ssl_ca_cert]) end it 'sets the ssl_ca_cert_string option' do expect(client.options[:ssl_ca_cert_string]).to eq(options[:ssl_ca_cert_string]) end it 'sets the ssl_ca_cert_object option' do expect(client.options[:ssl_ca_cert_object]).to eq(options[:ssl_ca_cert_object]) end it 'sets the ssl_cert option' do expect(client.options[:ssl_cert]).to eq(options[:ssl_cert]) end it 'sets the ssl_cert_string option' do expect(client.options[:ssl_cert_string]).to eq(options[:ssl_cert_string]) end it 'sets the ssl_cert_object option' do expect(client.options[:ssl_cert_object]).to eq(options[:ssl_cert_object]) end it 'sets the ssl_key option' do expect(client.options[:ssl_key]).to eq(options[:ssl_key]) end it 'sets the ssl_key_string option' do expect(client.options[:ssl_key_string]).to eq(options[:ssl_key_string]) end it 'sets the ssl_key_object option' do expect(client.options[:ssl_key_object]).to eq(options[:ssl_key_object]) end it 'sets the ssl_key_pass_phrase option' do expect(client.options[:ssl_key_pass_phrase]).to eq(options[:ssl_key_pass_phrase]) end it 'sets the ssl_verify option' do expect(client.options[:ssl_verify]).to eq(options[:ssl_verify]) end end context 'when no database is provided' do let(:client) do described_class.new(['127.0.0.1:27017'], :read => { :mode => :secondary }) end it 'defaults the database to admin' do expect(client.database.name).to eq('admin') end end context 'when a database is provided' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => :testdb) end it 'sets the current database' do expect(client[:users].name).to eq('users') end end context 'when providing a custom logger' do let(:logger) do Logger.new($stdout).tap do |l| l.level = Logger::FATAL end end let(:client) do authorized_client.with(logger: logger) end after do client.close end it 'does not use the global logger' do expect(client.cluster.logger).to_not eq(Mongo::Logger.logger) end end context 'when providing a heartbeat_frequency' do let(:client) do described_class.new(['127.0.0.1:27017'], :heartbeat_frequency => 2) end it 'sets the heartbeat frequency' do expect(client.cluster.options[:heartbeat_frequency]).to eq(client.options[:heartbeat_frequency]) end end context 'when min_pool_size is provided' do let(:client) do described_class.new(['127.0.0.1:27017'], options) end context 'when max_pool_size is provided' do context 'when the min_pool_size is greater than the max_pool_size' do let(:options) do { :min_pool_size => 20, :max_pool_size => 10 } end it 'raises an Exception' do expect { client }.to raise_exception(Mongo::Error::InvalidMinPoolSize) end end context 'when the min_pool_size is less than the max_pool_size' do let(:options) do { :min_pool_size => 10, :max_pool_size => 20 } end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(options[:min_pool_size]) expect(client.options[:max_pool_size]).to eq(options[:max_pool_size]) end end context 'when the min_pool_size is equal to the max_pool_size' do let(:options) do { :min_pool_size => 10, :max_pool_size => 10 } end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(options[:min_pool_size]) expect(client.options[:max_pool_size]).to eq(options[:max_pool_size]) end end end context 'when max_pool_size is not provided' do context 'when the min_pool_size is greater than the default max_pool_size' do let(:options) do { :min_pool_size => 10 } end it 'raises an Exception' do expect { client }.to raise_exception(Mongo::Error::InvalidMinPoolSize) end end context 'when the min_pool_size is less than the default max_pool_size' do let(:options) do { :min_pool_size => 3 } end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(options[:min_pool_size]) end end context 'when the min_pool_size is equal to the max_pool_size' do let(:options) do { :min_pool_size => Mongo::Server::ConnectionPool::Queue::MAX_SIZE } end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(options[:min_pool_size]) end end end end context 'when max_pool_size and min_pool_size are both nil' do let(:client) do described_class.new(['127.0.0.1:27017'], options) end let(:options) do { :min_pool_size => nil, :max_pool_size => nil } end it 'does not set either option' do expect(client.options[:max_pool_size]).to be_nil expect(client.options[:min_pool_size]).to be_nil end end context 'when platform details are specified' do let(:app_metadata) do client.cluster.app_metadata end let(:client) do described_class.new(['127.0.0.1:27017'], :platform => 'mongoid-6.0.2') end it 'includes the platform info in the app metadata' do expect(app_metadata.send(:full_client_document)[:platform]).to match(/mongoid-6\.0\.2/) end end context 'when platform details are not specified' do let(:app_metadata) do client.cluster.app_metadata end let(:client) do described_class.new(['127.0.0.1:27017']) end let(:platform_string) do [ RUBY_VERSION, RUBY_PLATFORM, RbConfig::CONFIG['build'] ].join(', ') end it 'does not include the platform info in the app metadata' do expect(app_metadata.send(:full_client_document)[:platform]).to eq(platform_string) end end end context 'when providing a connection string' do context 'when the string uses the SRV Protocol', if: test_connecting_externally? do let!(:uri) do 'mongodb+srv://test5.test.build.10gen.cc/testdb' end let(:client) do described_class.new(uri) end it 'sets the database' do expect(client.options[:database]).to eq('testdb') end end context 'when a database is provided' do let!(:uri) do 'mongodb://127.0.0.1:27017/testdb' end let(:client) do described_class.new(uri) end it 'sets the database' do expect { client[:users] }.to_not raise_error end end context 'when a database is not provided' do let!(:uri) do 'mongodb://127.0.0.1:27017' end let(:client) do described_class.new(uri) end it 'defaults the database to admin' do expect(client.database.name).to eq('admin') end end context 'when options are provided' do let!(:uri) do 'mongodb://127.0.0.1:27017/testdb?w=3' end let(:client) do described_class.new(uri) end let(:expected_options) do Mongo::Options::Redacted.new(:write => { :w => 3 }, :database => 'testdb') end it 'sets the options' do expect(client.options).to eq(expected_options) end context 'when min_pool_size is provided' do context 'when max_pool_size is provided' do context 'when the min_pool_size is greater than the max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=20&maxPoolSize=10' end it 'raises an Exception' do expect { client }.to raise_exception(Mongo::Error::InvalidMinPoolSize) end end context 'when the min_pool_size is less than the max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=10&maxPoolSize=20' end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(10) expect(client.options[:max_pool_size]).to eq(20) end end context 'when the min_pool_size is equal to the max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=10&maxPoolSize=10' end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(10) expect(client.options[:max_pool_size]).to eq(10) end end end context 'when max_pool_size is not provided' do context 'when the min_pool_size is greater than the default max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=10' end it 'raises an Exception' do expect { client }.to raise_exception(Mongo::Error::InvalidMinPoolSize) end end context 'when the min_pool_size is less than the default max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=3' end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(3) end end context 'when the min_pool_size is equal to the max_pool_size' do let(:uri) do 'mongodb://127.0.0.1:27017/?minPoolSize=5' end it 'sets the option' do expect(client.options[:min_pool_size]).to eq(5) end end end end end context 'when options are provided not in the string' do let!(:uri) do 'mongodb://127.0.0.1:27017/testdb' end let(:client) do described_class.new(uri, :write => { :w => 3 }) end let(:expected_options) do Mongo::Options::Redacted.new(:write => { :w => 3 }, :database => 'testdb') end it 'sets the options' do expect(client.options).to eq(expected_options) end end context 'when options are provided in the string and explicitly' do let!(:uri) do 'mongodb://127.0.0.1:27017/testdb?w=3' end let(:client) do described_class.new(uri, :write => { :w => 4 }) end let(:expected_options) do Mongo::Options::Redacted.new(:write => { :w => 4 }, :database => 'testdb') end it 'allows explicit options to take preference' do expect(client.options).to eq(expected_options) end end context 'when a replica set name is provided' do let!(:uri) do 'mongodb://127.0.0.1:27017/testdb?replicaSet=testing' end let(:client) do described_class.new(uri) end it 'sets the correct cluster topology' do expect(client.cluster.topology).to be_a(Mongo::Cluster::Topology::ReplicaSet) end end context 'when an invalid option is provided' do let(:client) do described_class.new(['127.0.0.1:27017'], :ssl => false, :invalid => :test) end it 'does not set the option' do expect(client.options.keys).not_to include('invalid') end it 'sets the valid options' do expect(client.options.keys).to include('ssl') end it 'warns that an invalid option has been specified' do expect(Mongo::Logger.logger).to receive(:warn) expect(client.options.keys).not_to include('invalid') end end end end describe '#server_selector' do context 'when there is a read preference set' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB, :read => mode, :server_selection_timeout => 2) end let(:server_selector) do client.server_selector end context 'when mode is primary' do let(:mode) do { :mode => :primary } end it 'returns a primary server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::Primary) end it 'passes the options to the cluster' do expect(client.cluster.options[:server_selection_timeout]).to eq(2) end end context 'when mode is primary_preferred' do let(:mode) do { :mode => :primary_preferred } end it 'returns a primary preferred server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::PrimaryPreferred) end end context 'when mode is secondary' do let(:mode) do { :mode => :secondary } end it 'uses a Secondary server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::Secondary) end end context 'when mode is secondary preferred' do let(:mode) do { :mode => :secondary_preferred } end it 'uses a Secondary server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::SecondaryPreferred) end end context 'when mode is nearest' do let(:mode) do { :mode => :nearest } end it 'uses a Secondary server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::Nearest) end end context 'when no mode provided' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB, :server_selection_timeout => 2) end it 'returns a primary server selector' do expect(server_selector).to be_a(Mongo::ServerSelector::Primary) end end context 'when the read preference is printed' do let(:client) do described_class.new([ default_address.to_s ], options) end let(:options) do { user: 'Emily', password: 'sensitive_data', server_selection_timeout: 0.1 } end before do allow(client.database.cluster).to receive(:single?).and_return(false) end let(:error) do begin client.database.command(ping: 1) rescue => e e end end it 'redacts sensitive client options' do expect(error.message).not_to match(options[:password]) end end end end describe '#read_preference' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB, :read => mode, :server_selection_timeout => 2) end let(:preference) do client.read_preference end context 'when mode is primary' do let(:mode) do { :mode => :primary } end it 'returns a primary read preference' do expect(preference).to eq(BSON::Document.new(mode)) end end context 'when mode is primary_preferred' do let(:mode) do { :mode => :primary_preferred } end it 'returns a primary preferred read preference' do expect(preference).to eq(BSON::Document.new(mode)) end end context 'when mode is secondary' do let(:mode) do { :mode => :secondary } end it 'returns a secondary read preference' do expect(preference).to eq(BSON::Document.new(mode)) end end context 'when mode is secondary preferred' do let(:mode) do { :mode => :secondary_preferred } end it 'returns a secondary preferred read preference' do expect(preference).to eq(BSON::Document.new(mode)) end end context 'when mode is nearest' do let(:mode) do { :mode => :nearest } end it 'returns a nearest read preference' do expect(preference).to eq(BSON::Document.new(mode)) end end context 'when no mode provided' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB, :server_selection_timeout => 2) end it 'returns nil' do expect(preference).to be_nil end end end describe '#use' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB) end shared_examples_for 'a database switching object' do it 'returns the new client' do expect(client.send(:database).name).to eq('ruby-driver') end it 'keeps the same cluster' do expect(database.cluster).to equal(client.cluster) end end context 'when provided a string' do let(:database) do client.use('testdb') end it_behaves_like 'a database switching object' end context 'when provided a symbol' do let(:database) do client.use(:testdb) end it_behaves_like 'a database switching object' end context 'when providing nil' do it 'raises an exception' do expect { client.use(nil) }.to raise_error(Mongo::Error::InvalidDatabaseName) end end end describe '#with' do let(:client) do described_class.new(['127.0.0.1:27017'], :database => TEST_DB) end context 'when providing nil' do it 'returns the cloned client' do expect(client.with(nil)).to eq(client) end end context 'when the app_name is changed' do let(:client) do authorized_client end let!(:original_options) do client.options end let(:new_options) do { app_name: 'reports' } end let!(:new_client) do authorized_client.with(new_options) end it 'returns a new client' do expect(new_client).not_to equal(client) end it 'replaces the existing options' do expect(new_client.options).to eq(client.options.merge(new_options)) end it 'does not modify the original client' do expect(client.options).to eq(original_options) end it 'does not keep the same cluster' do expect(new_client.cluster).not_to be(client.cluster) end end context 'when the write concern is not changed' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :secondary }, :write => { :w => 1 }, :database => TEST_DB ) end let!(:new_client) do client.with(:read => { :mode => :primary }) end let(:new_options) do Mongo::Options::Redacted.new(:read => { :mode => :primary }, :write => { :w => 1 }, :database => TEST_DB) end let(:original_options) do Mongo::Options::Redacted.new(:read => { :mode => :secondary }, :write => { :w => 1 }, :database => TEST_DB) end it 'returns a new client' do expect(new_client).not_to equal(client) end it 'replaces the existing options' do expect(new_client.options).to eq(new_options) end it 'does not modify the original client' do expect(client.options).to eq(original_options) end it 'keeps the same cluster' do expect(new_client.cluster).to be(client.cluster) end end context 'when the write concern is changed' do let(:client) do described_class.new(['127.0.0.1:27017'], :write => { :w => 1 }, :database => TEST_DB) end context 'when the write concern has not been accessed' do let!(:new_client) do client.with(:write => { :w => 0 }) end let(:get_last_error) do new_client.write_concern.get_last_error end it 'returns the correct write concern' do expect(get_last_error).to be_nil end end context 'when the write concern has been accessed' do let!(:new_client) do client.write_concern client.with(:write => { :w => 0 }) end let(:get_last_error) do new_client.write_concern.get_last_error end it 'returns the correct write concern' do expect(get_last_error).to be_nil end end end context 'when an invalid option is provided' do let(:new_client) do client.with(invalid: :option, ssl: false) end it 'does not set the invalid option' do expect(new_client.options.keys).not_to include('invalid') end it 'sets the valid options' do expect(new_client.options.keys).to include('ssl') end it 'warns that an invalid option has been specified' do expect(Mongo::Logger.logger).to receive(:warn) expect(new_client.options.keys).not_to include('invalid') end end end describe '#write_concern' do let(:concern) { client.write_concern } context 'when no option was provided to the client' do let(:client) { described_class.new(['127.0.0.1:27017'], :database => TEST_DB) } it 'does not set the write concern' do expect(concern).to be_nil end end context 'when an option is provided' do context 'when the option is acknowledged' do let(:client) do described_class.new(['127.0.0.1:27017'], :write => { :j => true }, :database => TEST_DB) end it 'returns a acknowledged write concern' do expect(concern.get_last_error).to eq(:getlasterror => 1, :j => true) end end context 'when the option is unacknowledged' do context 'when the w is 0' do let(:client) do described_class.new(['127.0.0.1:27017'], :write => { :w => 0 }, :database => TEST_DB) end it 'returns an unacknowledged write concern' do expect(concern.get_last_error).to be_nil end end context 'when the w is -1' do let(:client) do described_class.new(['127.0.0.1:27017'], :write => { :w => -1 }, :database => TEST_DB) end it 'raises an error' do expect { concern }.to raise_error(Mongo::Error::InvalidWriteConcern) end end end end end describe '#database_names' do it 'returns a list of database names' do expect(root_authorized_client.database_names).to include( 'admin' ) end context 'when filter criteria is present', if: sessions_enabled? do let(:result) do root_authorized_client.database_names(filter) end let(:filter) do { name: TEST_DB } end it 'returns a filtered list of database names' do expect(result.length).to eq(1) expect(result.first).to eq(filter[:name]) end end end describe '#list_databases' do it 'returns a list of database info documents' do expect( root_authorized_client.list_databases.collect do |i| i['name'] end).to include('admin') end context 'when filter criteria is present', if: sessions_enabled? do let(:result) do root_authorized_client.list_databases(filter) end let(:filter) do { name: TEST_DB } end it 'returns a filtered list of database info documents' do expect(result.length).to eq(1) expect(result[0]['name']).to eq(filter[:name]) end end context 'when name_only is true' do let(:client_options) do root_authorized_client.options.merge(heartbeat_frequency: 100, monitoring: true) end let(:client) do Mongo::Client.new(ADDRESSES, client_options).tap do |cl| cl.subscribe(Mongo::Monitoring::COMMAND, EventSubscriber.clear_events!) end end let(:command) do EventSubscriber.started_events.find { |c| c.command_name == :listDatabases }.command end before do client.list_databases({}, true) end after do client.close end it 'sends the command with the nameOnly flag set to true' do expect(command[:nameOnly]).to be(true) end end end describe '#list_mongo_databases' do let(:options) do { read: { mode: :secondary } } end let(:client) do root_authorized_client.with(options) end let(:result) do client.list_mongo_databases end it 'returns a list of Mongo::Database objects' do expect(result).to all(be_a(Mongo::Database)) end it 'creates database with specified options' do expect(result.first.options[:read]).to eq(BSON::Document.new(options)[:read]) end context 'when filter criteria is present', if: sessions_enabled? do let(:result) do client.list_mongo_databases(filter) end let(:filter) do { name: TEST_DB } end it 'returns a filtered list of Mongo::Database objects' do expect(result.length).to eq(1) expect(result.first.name).to eq(filter[:name]) end end end describe '#close' do let(:client) do described_class.new(['127.0.0.1:27017']) end before do expect(client.cluster).to receive(:disconnect!).and_call_original end it 'disconnects the cluster and returns true' do expect(client.close).to be(true) end end describe '#reconnect' do let(:client) do described_class.new(['127.0.0.1:27017']) end before do expect(client.cluster).to receive(:reconnect!).and_call_original end it 'reconnects the cluster and returns true' do expect(client.reconnect).to be(true) end end describe '#dup' do let(:client) do described_class.new( ['127.0.0.1:27017'], :read => { :mode => :primary }, :database => TEST_DB ) end it 'creates a client with Redacted options' do expect(client.dup.options).to be_a(Mongo::Options::Redacted) end end describe '#collections' do before do authorized_client.database[:users].create end after do authorized_client.database[:users].drop end let(:collection) do Mongo::Collection.new(authorized_client.database, 'users') end it 'refers the current database collections' do expect(authorized_client.collections).to include(collection) expect(authorized_client.collections).to all(be_a(Mongo::Collection)) end end describe '#start_session' do let(:session) do authorized_client.start_session end context 'when sessions are supported', if: test_sessions? do it 'creates a session' do expect(session).to be_a(Mongo::Session) end it 'sets the last use field to the current time' do expect(session.instance_variable_get(:@server_session).last_use).to be_within(0.2).of(Time.now) end context 'when options are provided' do let(:options) do { causal_consistency: true } end let(:session) do authorized_client.start_session(options) end it 'sets the options on the session' do expect(session.options[:causal_consistency]).to eq(options[:causal_consistency]) end end context 'when options are not provided' do it 'does not set options on the session' do expect(session.options).to eq({ implicit: false }) end end context 'when a session is checked out and checked back in' do let!(:session_a) do authorized_client.start_session end let!(:session_b) do authorized_client.start_session end let!(:session_a_server_session) do session_a.instance_variable_get(:@server_session) end let!(:session_b_server_session) do session_b.instance_variable_get(:@server_session) end before do session_a_server_session.next_txn_num session_a_server_session.next_txn_num session_b_server_session.next_txn_num session_b_server_session.next_txn_num session_a.end_session session_b.end_session end it 'is returned to the front of the queue' do expect(authorized_client.start_session.instance_variable_get(:@server_session)).to be(session_b_server_session) expect(authorized_client.start_session.instance_variable_get(:@server_session)).to be(session_a_server_session) end it 'preserves the transaction numbers on the server sessions' do expect(authorized_client.start_session.next_txn_num).to be(2) expect(authorized_client.start_session.next_txn_num).to be(2) end end context 'when an implicit session is used' do before do authorized_client.database.command(ping: 1) end let(:pool) do authorized_client.cluster.session_pool end let!(:before_last_use) do pool.instance_variable_get(:@queue)[0].last_use end it 'uses the session and updates the last use time' do authorized_client.database.command(ping: 1) expect(before_last_use).to be < (pool.instance_variable_get(:@queue)[0].last_use) end end end context 'when two clients have the same cluster', if: test_sessions? do let(:client) do authorized_client.with(read: { mode: :secondary }) end let(:session) do authorized_client.start_session end it 'allows the session to be used across the clients' do client[TEST_COLL].insert_one({ a: 1 }, session: session) end end context 'when two clients have different clusters', if: test_sessions? do let(:client) do authorized_client_with_retry_writes end let(:session) do authorized_client.start_session end it 'raises an exception' do expect { client[TEST_COLL].insert_one({ a: 1 }, session: session) }.to raise_exception(Mongo::Error::InvalidSession) end end context 'when sessions are not supported', unless: sessions_enabled? do it 'raises an exception' do expect { session }.to raise_exception(Mongo::Error::InvalidSession) end end end end mongo-2.5.1/spec/mongo/session/0000755000004100000410000000000013257253113016424 5ustar www-datawww-datamongo-2.5.1/spec/mongo/session/session_pool_spec.rb0000644000004100000410000001326713257253113022510 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Session::SessionPool, if: test_sessions? do let(:cluster) do authorized_client.cluster end describe '.create' do let!(:pool) do described_class.create(cluster) end it 'creates a session pool' do expect(pool).to be_a(Mongo::Session::SessionPool) end it 'adds the pool as an instance variable on the cluster' do expect(cluster.session_pool).to eq(pool) end end describe '#initialize' do let(:pool) do described_class.new(cluster) end it 'sets the cluster' do expect(pool.instance_variable_get(:@cluster)).to be(authorized_client.cluster) end end describe '#inspect' do let(:pool) do described_class.new(cluster) end before do s = pool.checkout pool.checkin(s) end it 'includes the Ruby object_id in the formatted string' do expect(pool.inspect).to include(pool.object_id.to_s) end it 'includes the pool size in the formatted string' do expect(pool.inspect).to include('current_size=1') end end describe 'checkout' do let(:pool) do described_class.new(cluster) end context 'when a session is checked out' do let!(:session_a) do pool.checkout end let!(:session_b) do pool.checkout end before do pool.checkin(session_a) pool.checkin(session_b) end it 'is returned to the front of the queue' do expect(pool.checkout).to be(session_b) expect(pool.checkout).to be(session_a) end end context 'when there are sessions about to expire in the queue' do let(:old_session_a) do pool.checkout end let(:old_session_b) do pool.checkout end before do pool.checkin(old_session_a) pool.checkin(old_session_b) allow(old_session_a).to receive(:last_use).and_return(Time.now - 1800) allow(old_session_b).to receive(:last_use).and_return(Time.now - 1800) end context 'when a session is checked out' do let(:checked_out_session) do pool.checkout end it 'disposes of the old session and returns a new one' do expect(checked_out_session).not_to be(old_session_a) expect(checked_out_session).not_to be(old_session_b) expect(pool.instance_variable_get(:@queue)).to be_empty end end end context 'when a sessions that is about to expire is checked in' do let(:old_session_a) do pool.checkout end let(:old_session_b) do pool.checkout end before do allow(old_session_a).to receive(:last_use).and_return(Time.now - 1800) allow(old_session_b).to receive(:last_use).and_return(Time.now - 1800) pool.checkin(old_session_a) pool.checkin(old_session_b) end it 'disposes of the old sessions instead of adding them to the pool' do expect(pool.checkout).not_to be(old_session_a) expect(pool.checkout).not_to be(old_session_b) expect(pool.instance_variable_get(:@queue)).to be_empty end end end describe '#end_sessions' do let(:pool) do described_class.create(client.cluster) end let!(:session_a) do pool.checkout end let!(:session_b) do pool.checkout end let(:client) do subscribed_client end after do client.close end context 'when the number of ids is not larger than 10,000' do before do client.database.command(ping: 1) pool.checkin(session_a) pool.checkin(session_b) end let!(:cluster_time) do client.cluster.cluster_time end let(:end_sessions_command) do pool.end_sessions EventSubscriber.started_events.find { |c| c.command_name == :endSessions} end it 'sends the endSessions command with all the session ids' do end_sessions_command expect(end_sessions_command.command[:endSessions]).to include(BSON::Document.new(session_a.session_id)) expect(end_sessions_command.command[:endSessions]).to include(BSON::Document.new(session_b.session_id)) end context 'when talking to a replica set or mongos' do it 'sends the endSessions command with all the session ids and cluster time' do end_sessions_command expect(end_sessions_command.command[:endSessions]).to include(BSON::Document.new(session_a.session_id)) expect(end_sessions_command.command[:endSessions]).to include(BSON::Document.new(session_b.session_id)) expect(end_sessions_command.command[:$clusterTime]).to eq(client.cluster.cluster_time) end end end context 'when the number of ids is larger than 10_000' do let(:ids) do 10_001.times.map do |i| bytes = [SecureRandom.uuid.gsub(/\-/, '')].pack('H*') BSON::Document.new(id: BSON::Binary.new(bytes, :uuid)) end end before do queue = [] ids.each do |id| queue << double('session', session_id: id) end pool.instance_variable_set(:@queue, queue) expect(Mongo::Operation::Commands::Command).to receive(:new).at_least(:twice).and_call_original end let(:end_sessions_commands) do EventSubscriber.started_events.select { |c| c.command_name == :endSessions} end it 'sends the command more than once' do pool.end_sessions expect(end_sessions_commands.size).to eq(2) expect(end_sessions_commands[0].command[:endSessions]).to eq(ids[0...10_000]) expect(end_sessions_commands[1].command[:endSessions]).to eq([ids[10_000]]) end end end end mongo-2.5.1/spec/mongo/session/server_session_spec.rb0000644000004100000410000000302013257253113023027 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Session::ServerSession do describe '#initialize' do it 'sets the last use variable to the current time' do expect(described_class.new.last_use).to be_within(0.2).of(Time.now) end it 'sets a UUID as the session id' do expect(described_class.new.instance_variable_get(:@session_id)).to be_a(BSON::Document) expect(described_class.new.session_id).to be_a(BSON::Document) expect(described_class.new.session_id[:id]).to be_a(BSON::Binary) end end describe '#next_txn_number' do it 'advances and returns the next transaction number' do expect(described_class.new.next_txn_num).to be(0) end context 'when the method is called multiple times' do let(:server_session) do described_class.new end before do server_session.next_txn_num server_session.next_txn_num end it 'advances and returns the next transaction number' do expect(server_session.next_txn_num).to be(2) end end end describe '#inspect' do let(:session) do described_class.new end it 'includes the Ruby object_id in the formatted string' do expect(session.inspect).to include(session.object_id.to_s) end it 'includes the session_id in the formatted string' do expect(session.inspect).to include(session.session_id.to_s) end it 'includes the last_use in the formatted string' do expect(session.inspect).to include(session.last_use.to_s) end end end mongo-2.5.1/spec/mongo/logger_spec.rb0000644000004100000410000000177013257253113017564 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Logger do let(:logger) do described_class.logger end describe '.logger' do context 'when no logger has been set' do let(:test_logger) do Mongo::Logger.logger end before do Mongo::Logger.logger = nil end after do Mongo::Logger.logger = test_logger end it 'returns the default logger' do expect(logger.level).to eq(Logger::DEBUG) end end context 'when a logger has been set' do let(:info) do Logger.new($stdout).tap do |log| log.level = Logger::INFO end end let(:debug) do Logger.new($stdout).tap do |log| log.level = Logger::DEBUG end end before do described_class.logger = debug end after do described_class.logger = info end it 'returns the provided logger' do expect(logger.level).to eq(Logger::DEBUG) end end end end mongo-2.5.1/spec/mongo/cursor/0000755000004100000410000000000013257253113016256 5ustar www-datawww-datamongo-2.5.1/spec/mongo/cursor/builder/0000755000004100000410000000000013257253113017704 5ustar www-datawww-datamongo-2.5.1/spec/mongo/cursor/builder/op_get_more_spec.rb0000644000004100000410000000177713257253113023556 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cursor::Builder::OpGetMore do describe '#specification' do let(:reply) do Mongo::Protocol::Reply.allocate end let(:result) do Mongo::Operation::Result.new(reply) end let(:view) do Mongo::Collection::View.new( authorized_collection, {}, tailable: true, max_time_ms: 100 ) end let(:cursor) do Mongo::Cursor.new(view, result, authorized_primary) end let(:builder) do described_class.new(cursor) end let(:specification) do builder.specification end it 'includes to return' do expect(specification[:to_return]).to eq(0) end it 'includes the cursor id' do expect(specification[:cursor_id]).to eq(cursor.id) end it 'includes the database name' do expect(specification[:db_name]).to eq(TEST_DB) end it 'includes the collection name' do expect(specification[:coll_name]).to eq(TEST_COLL) end end end mongo-2.5.1/spec/mongo/cursor/builder/get_more_command_spec.rb0000644000004100000410000001013313257253113024540 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Cursor::Builder::GetMoreCommand do describe '#specification' do let(:reply) do Mongo::Protocol::Reply.allocate end let(:result) do Mongo::Operation::Result.new(reply) end let(:cursor) do Mongo::Cursor.new(view, result, authorized_primary) end let(:builder) do described_class.new(cursor) end let(:specification) do builder.specification end let(:selector) do specification[:selector] end context 'when the operation has a session' do let(:view) do Mongo::Collection::View.new(authorized_collection) end let(:session) do double('session') end let(:builder) do described_class.new(cursor, session) end it 'adds the session to the specification' do expect(builder.specification[:session]).to be(session) end end shared_examples_for 'a getmore command builder' do it 'includes the database name' do expect(specification[:db_name]).to eq(TEST_DB) end it 'includes getmore with cursor id' do expect(selector[:getMore]).to eq(cursor.id) end it 'includes the collection name' do expect(selector[:collection]).to eq(TEST_COLL) end end context 'when the query is standard' do let(:view) do Mongo::Collection::View.new(authorized_collection) end it_behaves_like 'a getmore command builder' it 'does not include max time' do expect(selector[:maxTimeMS]).to be_nil end it 'does not include batch size' do expect(selector[:batchSize]).to be_nil end end context 'when the query has a batch size' do let(:view) do Mongo::Collection::View.new(authorized_collection, {}, batch_size: 10) end it_behaves_like 'a getmore command builder' it 'does not include max time' do expect(selector[:maxTimeMS]).to be_nil end it 'includes batch size' do expect(selector[:batchSize]).to eq(10) end end context 'when a max await time is specified' do context 'when the cursor is not tailable' do let(:view) do Mongo::Collection::View.new(authorized_collection, {}, max_await_time_ms: 100) end it_behaves_like 'a getmore command builder' it 'does not include max time' do expect(selector[:maxTimeMS]).to be_nil end it 'does not include max await time' do expect(selector[:maxAwaitTimeMS]).to be_nil end it 'does not include batch size' do expect(selector[:batchSize]).to be_nil end end context 'when the cursor is tailable' do context 'when await data is true' do let(:view) do Mongo::Collection::View.new( authorized_collection, {}, await_data: true, tailable: true, max_await_time_ms: 100 ) end it_behaves_like 'a getmore command builder' it 'includes max time' do expect(selector[:maxTimeMS]).to eq(100) end it 'does not include max await time' do expect(selector[:maxAwaitTimeMS]).to be_nil end it 'does not include batch size' do expect(selector[:batchSize]).to be_nil end end context 'when await data is false' do let(:view) do Mongo::Collection::View.new( authorized_collection, {}, tailable: true, max_await_time_ms: 100 ) end it_behaves_like 'a getmore command builder' it 'does not include max time' do expect(selector[:maxTimeMS]).to be_nil end it 'does not include max await time' do expect(selector[:maxAwaitTimeMS]).to be_nil end it 'does not include batch size' do expect(selector[:batchSize]).to be_nil end end end end end end mongo-2.5.1/spec/mongo/server_selection_spec.rb0000644000004100000410000000676413257253113021670 0ustar www-datawww-datarequire 'spec_helper' describe 'Server Selection' do include Mongo::ServerSelection::Read SERVER_SELECTION_TESTS.each do |file| spec = Mongo::ServerSelection::Read::Spec.new(file) context(spec.description) do let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:topology) do spec.type.new({}, monitoring) end let(:listeners) do Mongo::Event::Listeners.new end let(:cluster) do double('cluster').tap do |c| allow(c).to receive(:topology).and_return(topology) allow(c).to receive(:single?).and_return(topology.single?) allow(c).to receive(:sharded?).and_return(topology.sharded?) allow(c).to receive(:replica_set?).and_return(topology.replica_set?) allow(c).to receive(:unknown?).and_return(topology.unknown?) allow(c).to receive(:app_metadata).and_return(app_metadata) end end let(:candidate_servers) do spec.candidate_servers.collect do |server| address = Mongo::Address.new(server['address']) Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS).tap do |s| allow(s).to receive(:average_round_trip_time).and_return(server['avg_rtt_ms'] / 1000.0) allow(s).to receive(:tags).and_return(server['tags']) allow(s).to receive(:secondary?).and_return(server['type'] == 'RSSecondary') allow(s).to receive(:primary?).and_return(server['type'] == 'RSPrimary') allow(s).to receive(:connectable?).and_return(true) allow(s).to receive(:check_driver_support!).and_return(true) end end end let(:in_latency_window) do spec.in_latency_window.collect do |server| address = Mongo::Address.new(server['address']) Mongo::Server.new(address, cluster, monitoring, listeners, TEST_OPTIONS).tap do |s| allow(s).to receive(:average_round_trip_time).and_return(server['avg_rtt_ms'] / 1000.0) allow(s).to receive(:tags).and_return(server['tags']) allow(s).to receive(:connectable?).and_return(true) allow(s).to receive(:check_driver_support!).and_return(true) end end end let(:server_selector) do Mongo::ServerSelector.get(:mode => spec.read_preference['mode'], :tag_sets => spec.read_preference['tag_sets']) end before do allow(cluster).to receive(:servers).and_return(candidate_servers) allow(cluster).to receive(:options).and_return(server_selection_timeout: 0.2) allow(cluster).to receive(:scan!).and_return(true) allow(cluster).to receive(:app_metadata).and_return(app_metadata) end context 'Valid read preference and matching server available', if: spec.server_available? do it 'Finds all suitable servers in the latency window', if: spec.replica_set? do expect(server_selector.send(:select, cluster.servers)).to match_array(in_latency_window) end it 'Finds the most suitable server in the latency window' do expect(in_latency_window).to include(server_selector.select_server(cluster)) end end context 'No matching server available', if: !spec.server_available? do it 'Raises exception' do expect do server_selector.select_server(cluster) end.to raise_exception(Mongo::Error::NoServerAvailable) end end end end end mongo-2.5.1/spec/mongo/collection_spec.rb0000644000004100000410000035065413257253113020450 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Collection do after do authorized_collection.delete_many end let(:collection_invalid_write_concern) do authorized_collection.client.with(write: INVALID_WRITE_CONCERN)[authorized_collection.name] end let(:collection_with_validator) do authorized_client[:validating] end let(:client) do authorized_client end describe '#==' do let(:database) do Mongo::Database.new(authorized_client, :test) end let(:collection) do described_class.new(database, :users) end context 'when the names are the same' do context 'when the databases are the same' do let(:other) do described_class.new(database, :users) end it 'returns true' do expect(collection).to eq(other) end end context 'when the databases are not the same' do let(:other_db) do Mongo::Database.new(authorized_client, :testing) end let(:other) do described_class.new(other_db, :users) end it 'returns false' do expect(collection).to_not eq(other) end end context 'when the options are the same' do let(:other) do described_class.new(database, :users) end it 'returns true' do expect(collection).to eq(other) end end context 'when the options are not the same' do let(:other) do described_class.new(database, :users, :capped => true) end it 'returns false' do expect(collection).to_not eq(other) end end end context 'when the names are not the same' do let(:other) do described_class.new(database, :sounds) end it 'returns false' do expect(collection).to_not eq(other) end end context 'when the object is not a collection' do it 'returns false' do expect(collection).to_not eq('test') end end end describe '#with' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS) end let(:database) do Mongo::Database.new(client, :test) end let(:collection) do database.collection(:users) end let(:new_collection) do collection.with(new_options) end context 'when new read options are provided' do let(:new_options) do { read: { mode: :secondary } } end it 'returns a new collection' do expect(new_collection).not_to be(collection) end it 'sets the new read options on the new collection' do expect(new_collection.read_preference).to eq(new_options[:read]) end context 'when the client has a server selection timeout setting' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(server_selection_timeout: 2)) end it 'passes the the server_selection_timeout to the cluster' do expect(client.cluster.options[:server_selection_timeout]).to eq(client.options[:server_selection_timeout]) end end context 'when the client has a read preference set' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(read: { mode: :primary_preferred })) end it 'sets the new read options on the new collection' do expect(new_collection.read_preference).to eq(new_options[:read]) expect(new_collection.read_preference).not_to eq(client.read_preference) end end context 'when the client has a read preference and server selection timeout set' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(read: { mode: :primary_preferred }, server_selection_timeout: 2)) end it 'sets the new read options on the new collection' do expect(new_collection.read_preference).to eq(new_options[:read]) end it 'passes the server_selection_timeout setting to the cluster' do expect(client.cluster.options[:server_selection_timeout]).to eq(client.options[:server_selection_timeout]) end end end context 'when new write options are provided' do let(:new_options) do { write: { w: 5 } } end it 'returns a new collection' do expect(new_collection).not_to be(collection) end it 'sets the new write options on the new collection' do expect(new_collection.write_concern.options).to eq(Mongo::WriteConcern.get(new_options[:write]).options) end context 'when the client has a write concern set' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(write: INVALID_WRITE_CONCERN)) end it 'sets the new write options on the new collection' do expect(new_collection.write_concern.options).to eq(Mongo::WriteConcern.get(new_options[:write]).options) end end end context 'when new read and write options are provided' do let(:new_options) do { read: { mode: :secondary }, write: { w: 4} } end it 'returns a new collection' do expect(new_collection).not_to be(collection) end it 'sets the new read options on the new collection' do expect(new_collection.read_preference).to eq(new_options[:read]) end it 'sets the new write options on the new collection' do expect(new_collection.write_concern.options).to eq(Mongo::WriteConcern.get(new_options[:write]).options) end context 'when the client has a server selection timeout setting' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(server_selection_timeout: 2)) end it 'passes the server_selection_timeout setting to the cluster' do expect(client.cluster.options[:server_selection_timeout]).to eq(client.options[:server_selection_timeout]) end end context 'when the client has a read preference set' do let(:client) do Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(read: { mode: :primary_preferred })) end it 'sets the new read options on the new collection' do expect(new_collection.read_preference).to eq(new_options[:read]) expect(new_collection.read_preference).not_to be(client.read_preference) end end end context 'when neither read nor write options are provided' do let(:new_options) do { some_option: 'invalid' } end it 'raises an error' do expect { new_collection }.to raise_exception(Mongo::Error::UnchangeableCollectionOption) end end end describe '#read_preference' do let(:collection) do described_class.new(authorized_client.database, :users, options) end let(:options) { {} } context 'when a read preference is set in the options' do let(:options) do { read: { mode: :secondary } } end it 'returns the read preference' do expect(collection.read_preference).to eq(options[:read]) end end context 'when a read preference is not set in the options' do context 'when the database has a read preference set' do let(:client) do authorized_client.with(read: { mode: :secondary_preferred }) end let(:collection) do described_class.new(client.database, :users, options) end it 'returns the database read preference' do expect(collection.read_preference).to eq(BSON::Document.new({ mode: :secondary_preferred })) end end context 'when the database does not have a read preference' do it 'returns nil' do expect(collection.read_preference).to be_nil end end end end describe '#server_selector' do let(:collection) do described_class.new(authorized_client.database, :users, options) end let(:options) { {} } context 'when a read preference is set in the options' do let(:options) do { read: { mode: :secondary } } end it 'returns the server selector for that read preference' do expect(collection.server_selector).to be_a(Mongo::ServerSelector::Secondary) end end context 'when a read preference is not set in the options' do context 'when the database has a read preference set' do let(:client) do authorized_client.with(read: { mode: :secondary_preferred }) end let(:collection) do described_class.new(client.database, :users, options) end it 'returns the server selector for that read preference' do expect(collection.server_selector).to be_a(Mongo::ServerSelector::SecondaryPreferred) end end context 'when the database does not have a read preference' do it 'returns a primary server selector' do expect(collection.server_selector).to be_a(Mongo::ServerSelector::Primary) end end end end describe '#capped?' do let(:database) do authorized_client.database end context 'when the collection is capped' do let(:collection) do described_class.new(database, :specs, :capped => true, :size => 1024) end before do collection.create end after do collection.drop end it 'returns true' do expect(collection).to be_capped end end context 'when the collection is not capped' do let(:collection) do described_class.new(database, :specs) end before do collection.create end after do collection.drop end it 'returns false' do expect(collection).to_not be_capped end end end describe '#create' do let(:database) do authorized_client.database end context 'when the collection has no options' do let(:collection) do described_class.new(database, :specs) end let!(:response) do collection.create end after do collection.drop end it 'executes the command' do expect(response).to be_successful end it 'creates the collection in the database' do expect(database.collection_names).to include('specs') end end context 'when the collection has options' do context 'when the collection is capped' do shared_examples 'a capped collection command' do let!(:response) do collection.create end let(:options) do { :capped => true, :size => 1024 } end after do collection.drop end it 'executes the command' do expect(response).to be_successful end it 'sets the collection as capped' do expect(collection).to be_capped end it 'creates the collection in the database' do expect(database.collection_names).to include('specs') end end shared_examples 'a validated collection command' do let!(:response) do collection.create end let(:options) do { :validator => { fieldName: { '$gte' => 1024 } }, :validationLevel => 'strict' } end let(:collection_info) do database.list_collections.find { |i| i['name'] == 'specs' } end after do collection.drop end it 'executes the command' do expect(response).to be_successful end it 'sets the collection with validators' do expect(collection_info['options']['validator']).to eq({ 'fieldName' => { '$gte' => 1024 } }) end it 'creates the collection in the database' do expect(database.collection_names).to include('specs') end end context 'when instantiating a collection directly' do let(:collection) do described_class.new(database, :specs, options) end it_behaves_like 'a capped collection command' context 'when validators can be set', if: find_command_enabled? do it_behaves_like 'a validated collection command' end end context 'when instantiating a collection through the database' do let(:collection) do authorized_client[:specs, options] end it_behaves_like 'a capped collection command' context 'when validators can be set', if: find_command_enabled? do it_behaves_like 'a validated collection command' end end end context 'when the collection has a write concern' do after do database[:specs].drop end let(:options) do { write: INVALID_WRITE_CONCERN } end let(:collection) do described_class.new(database, :specs, options) end context 'when the server supports write concern on the create command', if: replica_set? && collation_enabled? do it 'applies the write concern' do expect{ collection.create }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not support write concern on the create command', unless: collation_enabled? do it 'does not apply the write concern' do expect(collection.create).to be_successful end end end context 'when the collection has a collation' do shared_examples 'a collection command with a collation option' do let(:response) do collection.create end let(:options) do { :collation => { locale: 'fr' } } end let(:collection_info) do database.list_collections.find { |i| i['name'] == 'specs' } end after do collection.drop end context 'when the server supports collations', if: collation_enabled? do it 'executes the command' do expect(response).to be_successful end it 'sets the collection with a collation' do response expect(collection_info['options']['collation']['locale']).to eq('fr') end it 'creates the collection in the database' do response expect(database.collection_names).to include('specs') end end context 'when the server does not support collations', unless: collation_enabled? do it 'raises an error' do expect { response }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'fr' } } end it 'raises an exception' do expect { response }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when instantiating a collection directly' do let(:collection) do described_class.new(database, :specs, options) end it_behaves_like 'a collection command with a collation option' end context 'when instantiating a collection through the database' do let(:collection) do authorized_client[:specs, options] end it_behaves_like 'a collection command with a collation option' end end context 'when a session is provided' do let(:collection) do authorized_client[:specs] end let(:operation) do collection.create(session: session) end let(:session) do authorized_client.start_session end let(:client) do authorized_client end let(:failed_operation) do authorized_client[:specs, invalid: true].create(session: session) end after do collection.drop end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end end end describe '#drop' do let(:database) do authorized_client.database end let(:collection) do described_class.new(database, :specs) end context 'when the collection exists' do before do collection.create end context 'when a session is provided' do let(:operation) do collection.drop(session: session) end let(:failed_operation) do collection.with(write: INVALID_WRITE_CONCERN).drop(session: session) end let(:session) do authorized_client.start_session end let(:client) do authorized_client end after do collection.drop end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when the collection does not have a write concern set' do let!(:response) do collection.drop end it 'executes the command' do expect(response).to be_successful end it 'drops the collection from the database' do expect(database.collection_names).to_not include('specs') end context 'when the collection does not exist' do it 'does not raise an error' do expect(database['non-existent-coll'].drop).to be(false) end end end context 'when the collection has a write concern' do let(:write_options) do { write: INVALID_WRITE_CONCERN } end let(:collection_with_write_options) do collection.with(write_options) end after do collection.drop end context 'when the server supports write concern on the drop command', if: collation_enabled? do it 'applies the write concern' do expect{ collection_with_write_options.drop }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not support write concern on the drop command', unless: collation_enabled? do it 'does not apply the write concern' do expect(collection_with_write_options.drop).to be_successful end end end end context 'when the collection does not exist' do it 'returns false' do expect(collection.drop).to be(false) end end end describe '#find' do describe 'updating cluster time' do let(:operation) do client[TEST_COLL].find.first end let(:operation_with_session) do client[TEST_COLL].find({}, session: session).first end let(:second_operation) do client[TEST_COLL].find({}, session: session).first end it_behaves_like 'an operation updating cluster time' end context 'when provided a filter' do let(:view) do authorized_collection.find(name: 1) end it 'returns a authorized_collection view for the filter' do expect(view.filter).to eq('name' => 1) end end context 'when provided no filter' do let(:view) do authorized_collection.find end it 'returns a authorized_collection view with an empty filter' do expect(view.filter).to be_empty end end context 'when providing a bad filter' do let(:view) do authorized_collection.find('$or' => []) end it 'raises an exception when iterating' do expect { view.to_a }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when iterating the authorized_collection view' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let(:view) do authorized_collection.find end it 'iterates over the documents' do view.each do |document| expect(document).to_not be_nil end end end context 'when the user is not authorized', if: auth_enabled? do let(:view) do unauthorized_collection.find end it 'iterates over the documents' do expect { view.each{ |document| document } }.to raise_error(Mongo::Error::OperationFailure) end end context 'when documents contain potential error message fields' do [ Mongo::Error::ERRMSG, Mongo::Error::ERROR, Mongo::Operation::Result::OK ].each do |field| context "when the document contains a '#{field}' field" do let(:value) do 'testing' end let(:view) do authorized_collection.find end before do authorized_collection.insert_one({ field => value }) end it 'iterates over the documents' do view.each do |document| expect(document[field]).to eq(value) end end end end end context 'when provided options' do context 'when a session is provided' do let(:operation) do authorized_collection.find({}, session: session).to_a end let(:session) do authorized_client.start_session end let(:failed_operation) do client[authorized_collection.name].find({ '$._id' => 1 }, session: session).to_a end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'session id', if: test_sessions? do let(:options) do { session: session } end let(:client) do subscribed_client end let(:session) do client.start_session end let(:view) do Mongo::Collection::View.new(client[TEST_COLL], selector, view_options) end let(:command) do client[TEST_COLL].find({}, session: session).explain EventSubscriber.started_events.find { |c| c.command_name == :explain }.command end it 'sends the session id' do expect(command['lsid']).to eq(session.session_id) end end context 'when a session supporting causal consistency is used' do let(:operation) do collection.find({}, session: session).to_a end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == 'find' }.command end it_behaves_like 'an operation supporting causally consistent reads' end let(:view) do authorized_collection.find({}, options) end context 'when provided :allow_partial_results' do let(:options) do { allow_partial_results: true } end it 'returns a view with :allow_partial_results set' do expect(view.options[:allow_partial_results]).to be(options[:allow_partial_results]) end end context 'when provided :batch_size' do let(:options) do { batch_size: 100 } end it 'returns a view with :batch_size set' do expect(view.options[:batch_size]).to eq(options[:batch_size]) end end context 'when provided :comment' do let(:options) do { comment: 'slow query' } end it 'returns a view with :comment set' do expect(view.modifiers[:$comment]).to eq(options[:comment]) end end context 'when provided :cursor_type' do let(:options) do { cursor_type: :tailable } end it 'returns a view with :cursor_type set' do expect(view.options[:cursor_type]).to eq(options[:cursor_type]) end end context 'when provided :max_time_ms' do let(:options) do { max_time_ms: 500 } end it 'returns a view with :max_time_ms set' do expect(view.modifiers[:$maxTimeMS]).to eq(options[:max_time_ms]) end end context 'when provided :modifiers' do let(:options) do { modifiers: { '$orderby' => Mongo::Index::ASCENDING } } end it 'returns a view with modifiers set' do expect(view.modifiers).to eq(options[:modifiers]) end it 'dups the modifiers hash' do expect(view.modifiers).not_to be(options[:modifiers]) end end context 'when provided :no_cursor_timeout' do let(:options) do { no_cursor_timeout: true } end it 'returns a view with :no_cursor_timeout set' do expect(view.options[:no_cursor_timeout]).to eq(options[:no_cursor_timeout]) end end context 'when provided :oplog_replay' do let(:options) do { oplog_replay: false } end it 'returns a view with :oplog_replay set' do expect(view.options[:oplog_replay]).to eq(options[:oplog_replay]) end end context 'when provided :projection' do let(:options) do { projection: { 'x' => 1 } } end it 'returns a view with :projection set' do expect(view.options[:projection]).to eq(options[:projection]) end end context 'when provided :skip' do let(:options) do { skip: 5 } end it 'returns a view with :skip set' do expect(view.options[:skip]).to eq(options[:skip]) end end context 'when provided :sort' do let(:options) do { sort: { 'x' => Mongo::Index::ASCENDING } } end it 'returns a view with :sort set' do expect(view.modifiers[:$orderby]).to eq(options[:sort]) end end context 'when provided :collation' do let(:options) do { collation: { 'locale' => 'en_US' } } end it 'returns a view with :collation set' do expect(view.options[:collation]).to eq(options[:collation]) end end end end describe '#insert_many' do let(:result) do authorized_collection.insert_many([{ name: 'test1' }, { name: 'test2' }]) end it 'inserts the documents into the collection' do expect(result.inserted_count).to eq(2) end it 'contains the ids in the result' do expect(result.inserted_ids.size).to eq(2) end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.insert_many([{ name: 'test1' }, { name: 'test2' }], session: session) end let(:failed_operation) do authorized_collection.insert_many([{ _id: 'test1' }, { _id: 'test1' }], session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.insert_many([{ name: 'test1' }, { name: 'test2' }], session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end context 'when a document contains invalid keys' do let(:docs) do [ { 'first.name' => 'test1' }, { name: 'test2' } ] end it 'raises a BSON::String::IllegalKey exception' do expect { authorized_collection.insert_many(docs) }.to raise_exception(BSON::String::IllegalKey) end end context 'when the client has a custom id generator' do let(:generator) do Class.new do def generate 1 end end.new end let(:custom_client) do authorized_client.with(id_generator: generator) end let(:custom_collection) do custom_client[TEST_COLL] end before do custom_collection.insert_many([{ name: 'testing' }]) end after do custom_client.close end it 'inserts with the custom id' do expect(custom_collection.find.first[:_id]).to eq(1) end end context 'when the inserts fail' do let(:result) do authorized_collection.insert_many([{ _id: 1 }, { _id: 1 }]) end it 'raises an BulkWriteError' do expect { result }.to raise_exception(Mongo::Error::BulkWriteError) end end context "when the documents exceed the max bson size" do let(:documents) do [{ '_id' => 1, 'name' => '1'*17000000 }] end it 'raises a MaxBSONSize error' do expect { authorized_collection.insert_many(documents) }.to raise_error(Mongo::Error::MaxBSONSize) end end context 'when the documents are sent with OP_MSG', if: op_msg_enabled? do let(:client) do subscribed_client end let(:documents) do [{ '_id' => 1, 'name' => '1'*16777191 }, { '_id' => 'y' }] end before do client[TEST_COLL].insert_many(documents) end after do client.close end let(:insert_events) do EventSubscriber.started_events.select { |e| e.command_name == :insert } end it 'sends the documents in one OP_MSG' do expect(insert_events.size).to eq(1) expect(insert_events[0].command['documents']).to eq(documents) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end context 'when the document is valid' do let(:result) do collection_with_validator.insert_many([{ a: 1 }, { a: 2 }]) end it 'inserts successfully' do expect(result.inserted_count).to eq(2) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.insert_many([{ x: 1 }, { x: 2 }]) end it 'raises a BulkWriteError' do expect { result2 }.to raise_exception(Mongo::Error::BulkWriteError) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.insert_many( [{ x: 1 }, { x: 2 }], :bypass_document_validation => true) end it 'inserts successfully' do expect(result3.inserted_count).to eq(2) end end end end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.insert_many([{ _id: 1 }, { _id: 1 }]) end it 'does not raise an exception' do expect(result.inserted_count).to be(0) end end end describe '#insert_one' do describe 'updating cluster time' do let(:operation) do client[TEST_COLL].insert_one({ name: 'testing' }) end let(:operation_with_session) do client[TEST_COLL].insert_one({ name: 'testing' }, session: session) end let(:second_operation) do client[TEST_COLL].insert_one({ name: 'testing' }, session: session) end it_behaves_like 'an operation updating cluster time' end let(:result) do authorized_collection.insert_one({ name: 'testing' }) end it 'inserts the document into the collection'do expect(result.written_count).to eq(1) end it 'contains the id in the result' do expect(result.inserted_id).to_not be_nil end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.insert_one({ name: 'testing' }, session: session) end let(:failed_operation) do authorized_collection.insert_one({ _id: 'testing' }) authorized_collection.insert_one({ _id: 'testing' }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.insert_one({ name: 'testing' }, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end context 'when the document contains invalid keys' do let(:doc) do { 'testing.test' => 'value' } end it 'raises a BSON::String::IllegalKey exception' do expect { authorized_collection.insert_one(doc) }.to raise_exception(BSON::String::IllegalKey) end end context 'when the insert fails' do let(:result) do authorized_collection.insert_one(_id: 1) authorized_collection.insert_one(_id: 1) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the client has a custom id generator' do let(:generator) do Class.new do def generate 1 end end.new end let(:custom_client) do authorized_client.with(id_generator: generator) end let(:custom_collection) do custom_client[TEST_COLL] end before do custom_collection.insert_one({ name: 'testing' }) end after do custom_client.close end it 'inserts with the custom id' do expect(custom_collection.find.first[:_id]).to eq(1) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end context 'when the document is valid' do let(:result) do collection_with_validator.insert_one({ a: 1 }) end it 'inserts successfully' do expect(result.written_count).to eq(1) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.insert_one({ x: 1 }) end it 'raises a OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.insert_one( { x: 1 }, :bypass_document_validation => true) end it 'inserts successfully' do expect(result3.written_count).to eq(1) end end end end end describe '#inspect' do it 'includes the object id' do expect(authorized_collection.inspect).to include(authorized_collection.object_id.to_s) end it 'includes the namespace' do expect(authorized_collection.inspect).to include(authorized_collection.namespace) end end describe '#indexes' do let(:index_spec) do { name: 1 } end let(:batch_size) { nil } let(:index_names) do authorized_collection.indexes(batch_size: batch_size).collect { |i| i['name'] } end before do authorized_collection.indexes.create_one(index_spec, unique: true) end after do authorized_collection.indexes.drop_one('name_1') end it 'returns a list of indexes' do expect(index_names).to include(*'name_1', '_id_') end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.indexes(batch_size: batch_size, session: session).collect { |i| i['name'] } end let(:failed_operation) do authorized_collection.indexes(batch_size: -100, session: session).collect { |i| i['name'] } end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when batch size is specified' do let(:batch_size) { 1 } it 'returns a list of indexes' do expect(index_names).to include(*'name_1', '_id_') end end end describe '#aggregate' do describe 'updating cluster time' do let(:operation) do client[TEST_COLL].aggregate([]).first end let(:operation_with_session) do client[TEST_COLL].aggregate([], session: session).first end let(:second_operation) do client[TEST_COLL].aggregate([], session: session).first end it_behaves_like 'an operation updating cluster time' end context 'when a session supporting causal consistency is used' do let(:operation) do collection.aggregate([], session: session).first end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == 'aggregate' }.command end it_behaves_like 'an operation supporting causally consistent reads' end it 'returns an Aggregation object' do expect(authorized_collection.aggregate([])).to be_a(Mongo::Collection::View::Aggregation) end context 'when options are provided' do let(:options) do { :allow_disk_use => true, :bypass_document_validation => true } end it 'sets the options on the Aggregation object' do expect(authorized_collection.aggregate([], options).options).to eq(BSON::Document.new(options)) end context 'when the :comment option is provided' do let(:options) do { :comment => 'testing' } end it 'sets the options on the Aggregation object' do expect(authorized_collection.aggregate([], options).options).to eq(BSON::Document.new(options)) end end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.aggregate([], session: session).to_a end let(:failed_operation) do authorized_collection.aggregate([ { '$invalid' => 1 }], session: session).to_a end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when a hint is provided' do let(:options) do { 'hint' => { 'y' => 1 } } end it 'sets the options on the Aggregation object' do expect(authorized_collection.aggregate([], options).options).to eq(options) end end context 'when collation is provided' do before do authorized_collection.insert_many([ { name: 'bang' }, { name: 'bang' }]) end let(:pipeline) do [{ "$match" => { "name" => "BANG" } }] end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end let(:result) do authorized_collection.aggregate(pipeline, options).collect { |doc| doc['name']} end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result).to eq(['bang', 'bang']) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end end describe '#count' do let(:documents) do (1..10).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end it 'returns an integer count' do expect(authorized_collection.count).to eq(10) end context 'when options are provided' do it 'passes the options to the count' do expect(authorized_collection.count({}, limit: 5)).to eq(5) end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.count({}, session: session) end let(:failed_operation) do authorized_collection.count({ '$._id' => 1 }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when a session supporting causal consistency is used' do let(:operation) do collection.count({}, session: session) end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == :count }.command end it_behaves_like 'an operation supporting causally consistent reads' end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.count(selector, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the count' do expect(result).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end end end describe '#distinct' do let(:documents) do (1..3).map{ |i| { field: "test#{i}" }} end before do authorized_collection.insert_many(documents) end it 'returns the distinct values' do expect(authorized_collection.distinct(:field).sort).to eq([ 'test1', 'test2', 'test3' ]) end context 'when a selector is provided' do it 'returns the distinct values' do expect(authorized_collection.distinct(:field, field: 'test1')).to eq([ 'test1' ]) end end context 'when options are provided' do it 'passes the options to the distinct command' do expect(authorized_collection.distinct(:field, {}, max_time_ms: 100).sort).to eq([ 'test1', 'test2', 'test3' ]) end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.distinct(:field, {}, session: session) end let(:failed_operation) do authorized_collection.distinct(:field, { '$._id' => 1 }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end end context 'when a session supporting causal consistency is used' do let(:operation) do collection.distinct(:field, {}, session: session) end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == :distinct }.command end it_behaves_like 'an operation supporting causally consistent reads' end context 'when a collation is specified' do let(:result) do authorized_collection.distinct(:name, {}, options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'BANG') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation to the distinct' do expect(result).to eq(['bang']) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:result) do authorized_collection.distinct(:name) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'BANG') end it 'does not apply the collation to the distinct' do expect(result).to match_array(['bang', 'BANG']) end end end describe '#delete_one' do context 'when a selector was provided' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([ { field: 'test1' }, { field: 'test1' }, { field: 'test1' } ]) end let(:response) do authorized_collection.delete_one(selector) end it 'deletes the first matching document in the collection' do expect(response.deleted_count).to eq(1) end end context 'when no selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end let(:response) do authorized_collection.delete_one end it 'deletes the first document in the collection' do expect(response.deleted_count).to eq(1) end end context 'when the delete fails', if: standalone? do let(:result) do collection_invalid_write_concern.delete_one end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.delete_one({}, session: session) end let(:failed_operation) do authorized_collection.delete_one({ '$._id' => 1}, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.delete_one({}, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end context 'when a collation is provided' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.delete_one(selector, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'bang').count).to eq(0) end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.delete_one(selector, options) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.delete_one(selector) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').count).to eq(1) end end end describe '#delete_many' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test2' }]) end context 'when a selector was provided' do let(:selector) do { field: 'test1' } end it 'deletes the matching documents in the collection' do expect(authorized_collection.delete_many(selector).deleted_count).to eq(1) end end context 'when no selector was provided' do it 'deletes all the documents in the collection' do expect(authorized_collection.delete_many.deleted_count).to eq(2) end end context 'when the deletes fail', if: standalone? do let(:result) do collection_invalid_write_concern.delete_many end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when a session is provided' do let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.delete_many({}, session: session) end let(:failed_operation) do authorized_collection.delete_many({ '$._id' => 1}, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.delete_many({ '$._id' => 1}, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.delete_many(selector, options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(name: 'bang').count).to eq(0) end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.delete_many(selector, options) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.delete_many(selector) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').count).to eq(2) end end end describe '#parallel_scan', unless: sharded? do let(:documents) do (1..200).map do |i| { name: "testing-scan-#{i}" } end end before do authorized_collection.insert_many(documents) end let(:cursors) do authorized_collection.parallel_scan(2) end it 'returns an array of cursors' do cursors.each do |cursor| expect(cursor.class).to be(Mongo::Cursor) end end it 'returns the correct number of documents' do expect( cursors.reduce(0) { |total, cursor| total + cursor.to_a.size } ).to eq(200) end context 'when a session is provided' do let(:cursors) do authorized_collection.parallel_scan(2, session: session) end let(:operation) do cursors.reduce(0) { |total, cursor| total + cursor.to_a.size } end let(:failed_operation) do authorized_collection.parallel_scan(-2, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when a session supporting causal consistency is used' do let(:cursors) do collection.parallel_scan(2, session: session) end let(:operation) do cursors.reduce(0) { |total, cursor| total + cursor.to_a.size } end let(:command) do operation EventSubscriber.started_events.find { |cmd| cmd.command_name == :parallelCollectionScan }.command end it_behaves_like 'an operation supporting causally consistent reads' end context 'when a read concern is provided', if: find_command_enabled? do let(:result) do authorized_collection.with(options).parallel_scan(2) end context 'when the read concern is valid' do let(:options) do { read_concern: { level: 'local' }} end it 'sends the read concern' do expect { result }.to_not raise_error end end context 'when the read concern is not valid' do let(:options) do { read_concern: { level: 'idontknow' }} end it 'raises an exception' do expect { result }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when the collection has a read preference', unless: sharded? do before do allow(collection.client.cluster).to receive(:single?).and_return(false) end after do client.close end let(:client) do authorized_client.with(server_selection_timeout: 0.2) end let(:collection) do client[authorized_collection.name, read: { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] }] end let(:result) do collection.parallel_scan(2) end it 'uses that read preference' do expect { result }.to raise_exception(Mongo::Error::NoServerAvailable) end end context 'when a max time ms value is provided', if: !sharded? do let(:result) do authorized_collection.parallel_scan(2, options) end context 'when the read concern is valid' do let(:options) do { max_time_ms: 5 } end it 'sends the max time ms value' do expect { result }.to_not raise_error end end context 'when the max time ms is not valid' do let(:options) do { max_time_ms: 0.1 } end it 'raises an exception' do expect { result }.to raise_error(Mongo::Error::OperationFailure) end end end end describe '#replace_one' do let(:selector) do { field: 'test1' } end context 'when a selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) end let!(:response) do authorized_collection.replace_one(selector, { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first matching document in the collection' do expect(response.modified_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is false' do let!(:response) do authorized_collection.replace_one(selector, { field: 'test1' }, upsert: false) end let(:updated) do authorized_collection.find(field: 'test1').to_a end it 'reports that no documents were written' do expect(response.modified_count).to eq(0) end it 'does not insert the document' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do authorized_collection.replace_one(selector, { field: 'test1' }, upsert: true) end let(:updated) do authorized_collection.find(field: 'test1').first end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts the document' do expect(updated[:field]).to eq('test1') end end context 'when upsert is not specified' do let!(:response) do authorized_collection.replace_one(selector, { field: 'test1' }) end let(:updated) do authorized_collection.find(field: 'test1').to_a end it 'reports that no documents were written' do expect(response.modified_count).to eq(0) end it 'does not insert the document' do expect(updated).to be_empty end end context 'when the replace fails' do let(:result) do authorized_collection.replace_one(selector, { '$s' => 'test1' }) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end before do collection_with_validator.insert_one({ a: 1 }) end context 'when the document is valid' do let(:result) do collection_with_validator.replace_one({ a: 1 }, { a: 5 }) end it 'replaces successfully' do expect(result.modified_count).to eq(1) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.replace_one({ a: 1 }, { x: 5 }) end it 'raises OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.replace_one( { a: 1 }, { x: 1 }, :bypass_document_validation => true) end it 'replaces successfully' do expect(result3.written_count).to eq(1) end end end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.replace_one(selector, { name: 'doink' }, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(name: 'doink').count).to eq(1) end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.replace_one(selector, { name: 'doink' }, options) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.replace_one(selector, { name: 'doink' }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) expect(authorized_collection.find(name: 'bang').count).to eq(1) end end context 'when a session is provided' do let(:selector) do { name: 'BANG' } end before do authorized_collection.insert_one(name: 'bang') end let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.replace_one(selector, { name: 'doink' }, session: session) end let(:failed_operation) do authorized_collection.replace_one({ '$._id' => 1 }, { name: 'doink' }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.replace_one({ a: 1 }, { x: 5 }, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end end describe '#update_many' do let(:selector) do { field: 'test' } end context 'when a selector was provided' do before do authorized_collection.insert_many([{ field: 'test' }, { field: 'test' }]) end let!(:response) do authorized_collection.update_many(selector, '$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').to_a.last end it 'returns the number updated' do expect(response.modified_count).to eq(2) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is false' do let(:response) do authorized_collection.update_many(selector, { '$set'=> { field: 'testing' } }, upsert: false) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.modified_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do authorized_collection.update_many(selector, { '$set'=> { field: 'testing' } }, upsert: true) end let(:updated) do authorized_collection.find.to_a.last end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts a document into the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is not specified' do let(:response) do authorized_collection.update_many(selector, { '$set'=> { field: 'testing' } }) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.modified_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when arrayFilters is provided' do let(:selector) do { '$or' => [{ _id: 0 }, { _id: 1 }]} end context 'when the server supports arrayFilters', if: array_filters_enabled? do before do authorized_collection.insert_many([{ _id: 0, x: [ { y: 1 }, { y: 2 }, { y: 3 } ] }, { _id: 1, x: [ { y: 3 }, { y: 2 }, { y: 1 } ] }]) end let(:result) do authorized_collection.update_many(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result.matched_count).to eq(2) expect(result.modified_count).to eq(2) docs = authorized_collection.find(selector, sort: { _id: 1 }).to_a expect(docs[0]['x']).to eq ([{ 'y' => 1 }, { 'y' => 2 }, { 'y' => 5 }]) expect(docs[1]['x']).to eq ([{ 'y' => 5 }, { 'y' => 2 }, { 'y' => 1 }]) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result.matched_count).to eq(2) expect(result.modified_count).to eq(2) docs = authorized_collection.find({}, sort: { _id: 1 }).to_a expect(docs[0]['x']).to eq ([{ 'y' => 1 }, { 'y' => 2 }, { 'y' => 5 }]) expect(docs[1]['x']).to eq ([{ 'y' => 5 }, { 'y' => 2 }, { 'y' => 1 }]) end end end context 'when the server does not support arrayFilters', unless: array_filters_enabled? do let(:result) do authorized_collection.update_many(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end end end context 'when the updates fail' do let(:result) do authorized_collection.update_many(selector, { '$s'=> { field: 'testing' } }) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end before do collection_with_validator.insert_many([{ a: 1 }, { a: 2 }]) end context 'when the document is valid' do let(:result) do collection_with_validator.update_many( { :a => { '$gt' => 0 } }, '$inc' => { :a => 1 } ) end it 'updates successfully' do expect(result.modified_count).to eq(2) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.update_many( { :a => { '$gt' => 0 } }, '$unset' => { :a => '' }) end it 'raises OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.update_many( { :a => { '$gt' => 0 } }, { '$unset' => { :a => '' } }, :bypass_document_validation => true) end it 'updates successfully' do expect(result3.written_count).to eq(2) end end end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.update_many(selector, { '$set' => { other: 'doink' } }, options) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(2) expect(authorized_collection.find(other: 'doink').count).to eq(2) end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.update_many(selector, { '$set' => { other: 'doink' } }, options) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do {name: 'BANG'} end let(:result) do authorized_collection.update_many(selector, { '$set' => {other: 'doink'} }) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end it 'does not apply the collation' do expect(result.written_count).to eq(0) end end context 'when a session is provided' do let(:selector) do { name: 'BANG' } end let(:operation) do authorized_collection.update_many(selector, { '$set' => {other: 'doink'} }, session: session) end before do authorized_collection.insert_one(name: 'bang') authorized_collection.insert_one(name: 'baNG') end let(:session) do authorized_client.start_session end let(:failed_operation) do authorized_collection.update_many({ '$._id' => 1 }, { '$set' => {other: 'doink'} }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.update_many({a: 1}, { '$set' => {x: 1} }, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end end describe '#update_one' do let(:selector) do { field: 'test1' } end context 'when a selector was provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) end let!(:response) do authorized_collection.update_one(selector, '$set'=> { field: 'testing' }) end let(:updated) do authorized_collection.find(field: 'testing').first end it 'updates the first matching document in the collection' do expect(response.modified_count).to eq(1) end it 'updates the documents in the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is false' do let(:response) do authorized_collection.update_one(selector, { '$set'=> { field: 'testing' } }, upsert: false) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.modified_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when upsert is true' do let!(:response) do authorized_collection.update_one(selector, { '$set'=> { field: 'testing' } }, upsert: true) end let(:updated) do authorized_collection.find.first end it 'reports that a document was written' do expect(response.written_count).to eq(1) end it 'inserts a document into the collection' do expect(updated[:field]).to eq('testing') end end context 'when upsert is not specified' do let(:response) do authorized_collection.update_one(selector, { '$set'=> { field: 'testing' } }) end let(:updated) do authorized_collection.find.to_a end it 'reports that no documents were updated' do expect(response.modified_count).to eq(0) end it 'updates no documents in the collection' do expect(updated).to be_empty end end context 'when the update fails' do let(:result) do authorized_collection.update_one(selector, { '$s'=> { field: 'testing' } }) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end before do collection_with_validator.insert_one({ a: 1 }) end context 'when the document is valid' do let(:result) do collection_with_validator.update_one( { :a => { '$gt' => 0 } }, '$inc' => { :a => 1 } ) end it 'updates successfully' do expect(result.modified_count).to eq(1) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.update_one( { :a => { '$gt' => 0 } }, '$unset' => { :a => '' }) end it 'raises OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.update_one( { :a => { '$gt' => 0 } }, { '$unset' => { :a => '' } }, :bypass_document_validation => true) end it 'updates successfully' do expect(result3.written_count).to eq(1) end end end end context 'when there is a collation specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.update_one(selector, { '$set' => { other: 'doink' } }, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result.written_count).to eq(1) expect(authorized_collection.find(other: 'doink').count).to eq(1) end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:result) do collection_with_unacknowledged_write_concern.update_one(selector, { '$set' => { other: 'doink' } }, options) end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when a collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.update_one(selector, { '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result.written_count).to eq(0) end end context 'when arrayFilters is provided' do let(:selector) do { _id: 0} end context 'when the server supports arrayFilters', if: array_filters_enabled? do before do authorized_collection.insert_one(_id: 0, x: [{ y: 1 }, { y: 2 }, {y: 3 }]) end let(:result) do authorized_collection.update_one(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result.matched_count).to eq(1) expect(result.modified_count).to eq(1) expect(authorized_collection.find(selector).first['x'].last['y']).to eq(5) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result.matched_count).to eq(1) expect(result.modified_count).to eq(1) expect(authorized_collection.find(selector).first['x'].last['y']).to eq(5) end end end context 'when the server does not support arrayFilters', unless: array_filters_enabled? do let(:result) do authorized_collection.update_one(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end end end context 'when the documents are sent with OP_MSG', if: op_msg_enabled? do let(:client) do subscribed_client end let(:documents) do [{ '_id' => 1, 'name' => '1'*16777191 }, { '_id' => 'y' }] end before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) client[TEST_COLL].update_one({ a: 1 }, {'$set' => { 'name' => '1'*16777149 }}) end after do client.close end let(:update_events) do EventSubscriber.started_events.select { |e| e.command_name == :update } end it 'sends the documents in one OP_MSG' do expect(update_events.size).to eq(1) end end context 'when a session is provided' do before do authorized_collection.insert_many([{ field: 'test1' }, { field: 'test1' }]) end let(:session) do authorized_client.start_session end let(:operation) do authorized_collection.update_one({ field: 'test' }, { '$set'=> { field: 'testing' } }, session: session) end let(:failed_operation) do authorized_collection.update_one({ '$._id' => 1 }, { '$set'=> { field: 'testing' } }, session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when unacknowledged writes is used' do let(:collection_with_unacknowledged_write_concern) do authorized_collection.with(write: { w: 0 }) end let(:operation) do collection_with_unacknowledged_write_concern.update_one({a: 1}, { '$set' => {x: 1} }, session: session) end it_behaves_like 'a causally consistent client session with an unacknowledged write' end end describe '#find_one_and_delete' do before do authorized_collection.insert_many([{ field: 'test1' }]) end let(:selector) do { field: 'test1' } end context 'when a matching document is found' do context 'when a session is provided' do let(:operation) do authorized_collection.find_one_and_delete(selector, session: session) end let(:failed_operation) do authorized_collection.find_one_and_delete({ '$._id' => 1 }, session: session) end let(:session) do authorized_client.start_session end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when no options are provided' do let!(:document) do authorized_collection.find_one_and_delete(selector) end it 'deletes the document from the database' do expect(authorized_collection.find.to_a).to be_empty end it 'returns the document' do expect(document['field']).to eq('test1') end end context 'when a projection is provided' do let!(:document) do authorized_collection.find_one_and_delete(selector, projection: { _id: 1 }) end it 'deletes the document from the database' do expect(authorized_collection.find.to_a).to be_empty end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let!(:document) do authorized_collection.find_one_and_delete(selector, sort: { field: 1 }) end it 'deletes the document from the database' do expect(authorized_collection.find.to_a).to be_empty end it 'returns the document with limited fields' do expect(document['field']).to eq('test1') end end context 'when max_time_ms is provided' do it 'includes the max_time_ms value in the command' do expect { authorized_collection.find_one_and_delete(selector, max_time_ms: 0.1) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when no matching document is found' do let(:selector) do { field: 'test5' } end let!(:document) do authorized_collection.find_one_and_delete(selector) end it 'returns nil' do expect(document).to be_nil end end context 'when the operation fails' do let(:result) do authorized_collection.find_one_and_delete(selector, max_time_ms: 0.1) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when write_concern is provided', if: find_command_enabled? && standalone? do it 'uses the write concern' do expect { authorized_collection.find_one_and_delete(selector, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the collection has a write concern', if: find_command_enabled? && standalone? do let(:collection) do authorized_collection.with(write: { w: 2 }) end it 'uses the write concern' do expect { collection.find_one_and_delete(selector, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_delete(selector, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find(name: 'bang').count).to eq(0) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_delete(selector) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end end describe '#find_one_and_update' do let(:selector) do { field: 'test1' } end before do authorized_collection.insert_many([{ field: 'test1' }]) end context 'when a matching document is found' do context 'when no options are provided' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when a session is provided' do let(:operation) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, session: session) end let(:failed_operation) do authorized_collection.find_one_and_update({ '$._id' => 1 }, { '$set' => { field: 'testing' }}, session: session) end let(:session) do authorized_client.start_session end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when no options are provided' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when return_document options are provided' do context 'when return_document is :after' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end context 'when return_document is :before' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, :return_document => :before) end it 'returns the original document' do expect(document['field']).to eq('test1') end end end context 'when a projection is provided' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, projection: { _id: 1 }) end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, sort: { field: 1 }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end end context 'when max_time_ms is provided' do it 'includes the max_time_ms value in the command' do expect { authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, max_time_ms: 0.1) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when no matching document is found' do let(:selector) do { field: 'test5' } end let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}) end it 'returns nil' do expect(document).to be_nil end end context 'when no matching document is found' do context 'when no upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}) end it 'returns nil' do expect(document).to be_nil end end context 'when upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, :upsert => true, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end end context 'when the operation fails' do let(:result) do authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, max_time_ms: 0.1) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end before do collection_with_validator.insert_one({ a: 1 }) end context 'when the document is valid' do let(:result) do collection_with_validator.find_one_and_update( { a: 1 }, { '$inc' => { :a => 1 } }, :return_document => :after) end it 'updates successfully' do expect(result['a']).to eq(2) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.find_one_and_update( { a: 1 }, { '$unset' => { :a => '' } }, :return_document => :after) end it 'raises OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.find_one_and_update( { a: 1 }, { '$unset' => { :a => '' } }, :bypass_document_validation => true, :return_document => :after) end it 'updates successfully' do expect(result3['a']).to be_nil end end end end context 'when write_concern is provided', if: find_command_enabled? && standalone? do it 'uses the write concern' do expect { authorized_collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the collection has a write concern', if: find_command_enabled? && standalone? do let(:collection) do authorized_collection.with(write: { w: 2 }) end it 'uses the write concern' do expect { collection.find_one_and_update(selector, { '$set' => { field: 'testing' }}, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when a collation is specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_update(selector, { '$set' => { other: 'doink' } }, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find({ name: 'bang' }, limit: -1).first['other']).to eq('doink') end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when there is no collation specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_update(selector, { '$set' => { other: 'doink' } }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end context 'when arrayFilters is provided' do let(:selector) do { _id: 0 } end context 'when the server supports arrayFilters', if: array_filters_enabled? do before do authorized_collection.insert_one(_id: 0, x: [{ y: 1 }, { y: 2 }, { y: 3 }]) end let(:result) do authorized_collection.find_one_and_update(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result['x']).to eq([{ 'y' => 1 }, { 'y' => 2 }, { 'y' => 3 }]) expect(authorized_collection.find(selector).first['x'].last['y']).to eq(5) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'applies the arrayFilters' do expect(result['x']).to eq([{ 'y' => 1 }, { 'y' => 2 }, { 'y' => 3 }]) expect(authorized_collection.find(selector).first['x'].last['y']).to eq(5) end end end context 'when the server selected does not support arrayFilters', unless: array_filters_enabled? do let(:result) do authorized_collection.find_one_and_update(selector, { '$set' => { 'x.$[i].y' => 5 } }, options) end context 'when a Symbol key is used' do let(:options) do { array_filters: [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end context 'when a String key is used' do let(:options) do { 'array_filters' => [{ 'i.y' => 3 }] } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedArrayFilters) end end end end end describe '#find_one_and_replace' do before do authorized_collection.insert_many([{ field: 'test1', other: 'sth' }]) end let(:selector) do { field: 'test1' } end context 'when a matching document is found' do context 'when no options are provided' do let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end context 'when a session is provided' do let(:operation) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, session: session) end let(:failed_operation) do authorized_collection.find_one_and_replace({ '$._id' => 1}, { field: 'testing' }, session: session) end let(:session) do authorized_client.start_session end let(:client) do authorized_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' end context 'when return_document options are provided' do context 'when return_document is :after' do let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end context 'when return_document is :before' do let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, :return_document => :before) end it 'returns the original document' do expect(document['field']).to eq('test1') end end end context 'when a projection is provided' do let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, projection: { _id: 1 }) end it 'returns the document with limited fields' do expect(document['field']).to be_nil expect(document['_id']).to_not be_nil end end context 'when a sort is provided' do let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, :sort => { field: 1 }) end it 'returns the original document' do expect(document['field']).to eq('test1') end end end context 'when no matching document is found' do context 'when no upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }) end it 'returns nil' do expect(document).to be_nil end end context 'when upsert options are provided' do let(:selector) do { field: 'test5' } end let(:document) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, :upsert => true, :return_document => :after) end it 'returns the new document' do expect(document['field']).to eq('testing') end end end context 'when max_time_ms is provided' do it 'includes the max_time_ms value in the command' do expect { authorized_collection.find_one_and_replace(selector, { field: 'testing' }, max_time_ms: 0.1) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the operation fails' do let(:result) do authorized_collection.find_one_and_replace(selector, { field: 'testing' }, max_time_ms: 0.1) end it 'raises an OperationFailure' do expect { result }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when collection has a validator', if: find_command_enabled? do around(:each) do |spec| authorized_client[:validating, :validator => { :a => { '$exists' => true } }].tap do |c| c.create end spec.run collection_with_validator.drop end before do collection_with_validator.insert_one({ a: 1 }) end context 'when the document is valid' do let(:result) do collection_with_validator.find_one_and_replace( { a: 1 }, { a: 5 }, :return_document => :after) end it 'replaces successfully when document is valid' do expect(result[:a]).to eq(5) end end context 'when the document is invalid' do context 'when bypass_document_validation is not set' do let(:result2) do collection_with_validator.find_one_and_replace( { a: 1 }, { x: 5 }, :return_document => :after) end it 'raises OperationFailure' do expect { result2 }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when bypass_document_validation is true' do let(:result3) do collection_with_validator.find_one_and_replace( { a: 1 }, { x: 1 }, :bypass_document_validation => true, :return_document => :after) end it 'replaces successfully' do expect(result3[:x]).to eq(1) expect(result3[:a]).to be_nil end end end end context 'when write_concern is provided', if: find_command_enabled? && standalone? do it 'uses the write concern' do expect { authorized_collection.find_one_and_replace(selector, { field: 'testing' }, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the collection has a write concern', if: find_command_enabled? && standalone? do let(:collection) do authorized_collection.with(write: { w: 2 }) end it 'uses the write concern' do expect { collection.find_one_and_replace(selector, { field: 'testing' }, write_concern: { w: 2 }) }.to raise_error(Mongo::Error::OperationFailure) end end context 'when collation is provided' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_replace(selector, { name: 'doink' }, options) end before do authorized_collection.insert_one(name: 'bang') end let(:options) do { collation: { locale: 'en_US', strength: 2 } } end context 'when the server selected supports collations', if: collation_enabled? do it 'applies the collation' do expect(result['name']).to eq('bang') expect(authorized_collection.find(name: 'doink').count).to eq(1) end end context 'when the server selected does not support collations', unless: collation_enabled? do it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end context 'when a String key is used' do let(:options) do { 'collation' => { locale: 'en_US', strength: 2 } } end it 'raises an exception' do expect { result }.to raise_exception(Mongo::Error::UnsupportedCollation) end end end end context 'when collation is not specified' do let(:selector) do { name: 'BANG' } end let(:result) do authorized_collection.find_one_and_replace(selector, { name: 'doink' }) end before do authorized_collection.insert_one(name: 'bang') end it 'does not apply the collation' do expect(result).to be_nil end end end describe '#watch' do context 'when change streams can be tested', if: test_change_streams? do let(:change_stream) do authorized_collection.watch end let(:enum) do change_stream.to_enum end before do change_stream authorized_collection.insert_one(a: 1) end context 'when no options are provided' do context 'when the operation type is an insert' do it 'returns the change' do expect(enum.next[:fullDocument][:a]).to eq(1) end end context 'when the operation type is an update' do before do authorized_collection.update_one({ a: 1 }, { '$set' => { a: 2 } }) end let(:change_doc) do enum.next enum.next end it 'returns the change' do expect(change_doc[:operationType]).to eq('update') expect(change_doc[:updateDescription][:updatedFields]).to eq('a' => 2) end end end context 'when options are provided' do context 'when full_document is updateLookup' do let(:change_stream) do authorized_collection.watch([], full_document: 'updateLookup').to_enum end before do authorized_collection.update_one({ a: 1 }, { '$set' => { a: 2 } }) end let(:change_doc) do enum.next enum.next end it 'returns the change' do expect(change_doc[:operationType]).to eq('update') expect(change_doc[:fullDocument][:a]).to eq(2) end end context 'when batch_size is provided' do before do Thread.new do sleep 1 authorized_collection.insert_one(a: 2) authorized_collection.insert_one(a: 3) end end let(:change_stream) do authorized_collection.watch([], batch_size: 2) end it 'returns the documents in the batch size specified' do expect(change_stream.instance_variable_get(:@cursor)).to receive(:get_more).once.and_call_original enum.next end end context 'when collation is provided' do before do authorized_collection.update_one({ a: 1 }, { '$set' => { a: 2 } }) end let(:change_doc) do enum.next end let(:change_stream) do authorized_collection.watch([ { '$match' => { operationType: 'UPDATE'}}], collation: { locale: 'en_US', strength: 2 } ).to_enum end it 'returns the change' do expect(change_doc['operationType']).to eq('update') expect(change_doc['updateDescription']['updatedFields']['a']).to eq(2) end end end end end end mongo-2.5.1/spec/mongo/event/0000755000004100000410000000000013257253113016062 5ustar www-datawww-datamongo-2.5.1/spec/mongo/event/subscriber_spec.rb0000644000004100000410000000117113257253113021564 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Event::Subscriber do let(:listeners) do Mongo::Event::Listeners.new end let(:klass) do Class.new do include Mongo::Event::Subscriber def initialize(listeners) @event_listeners = listeners end end end describe '#subscribe_to' do let(:listener) do double('listener') end let(:subscriber) do klass.new(listeners) end it 'adds subscribes the listener to the publisher' do expect(listeners).to receive(:add_listener).with('test', listener) subscriber.subscribe_to('test', listener) end end end mongo-2.5.1/spec/mongo/event/publisher_spec.rb0000644000004100000410000000175713257253113021430 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Event::Publisher do describe '#publish' do let(:listeners) do Mongo::Event::Listeners.new end let(:klass) do Class.new do include Mongo::Event::Publisher def initialize(listeners) @event_listeners = listeners end end end let(:publisher) do klass.new(listeners) end let(:listener) do double('listener') end context 'when the event has listeners' do before do listeners.add_listener('test', listener) listeners.add_listener('test', listener) end it 'handles the event for each listener' do expect(listener).to receive(:handle).with('test').twice publisher.publish('test', 'test') end end context 'when the event has no listeners' do it 'does not handle anything' do expect(listener).to receive(:handle).never publisher.publish('test', 'test') end end end end mongo-2.5.1/spec/mongo/write_concern/0000755000004100000410000000000013257253113017602 5ustar www-datawww-datamongo-2.5.1/spec/mongo/write_concern/unacknowledged_spec.rb0000644000004100000410000000067013257253113024136 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::WriteConcern::Unacknowledged do let(:concern) do described_class.new(:w => 0) end describe '#get_last_error' do it 'returns nil' do expect(concern.get_last_error).to be_nil end end describe '#acknowledged?' do let(:concern) do described_class.new(:w => 0) end it 'returns false' do expect(concern.acknowledged?).to be(false) end end end mongo-2.5.1/spec/mongo/write_concern/acknowledged_spec.rb0000644000004100000410000000217013257253113023570 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::WriteConcern::Acknowledged do describe '#acknowledged?' do let(:concern) do described_class.new(:w => :majority) end it 'returns true' do expect(concern.acknowledged?).to be(true) end end describe '#get_last_error' do let(:get_last_error) do concern.get_last_error end context 'when the options are symbols' do let(:concern) do described_class.new(:w => :majority) end it 'converts the values to strings' do expect(get_last_error).to eq(:getlasterror => 1, :w => 'majority') end end context 'when the options are strings' do let(:concern) do described_class.new(:w => 'majority') end it 'keeps the values as strings' do expect(get_last_error).to eq(:getlasterror => 1, :w => 'majority') end end context 'when the options are numbers' do let(:concern) do described_class.new(:w => 3) end it 'keeps the values as numbers' do expect(get_last_error).to eq(:getlasterror => 1, :w => 3) end end end end mongo-2.5.1/spec/mongo/server_selector/0000755000004100000410000000000013257253113020147 5ustar www-datawww-datamongo-2.5.1/spec/mongo/server_selector/nearest_spec.rb0000644000004100000410000002173113257253113023153 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector::Nearest do let(:name) { :nearest } include_context 'server selector' it_behaves_like 'a server selector mode' do let(:slave_ok) { true } end it_behaves_like 'a server selector accepting tag sets' it_behaves_like 'a server selector with sensitive data in its options' describe '#initialize' do context 'when max_staleness is provided' do let(:options) do { max_staleness: 95 } end it 'sets the max_staleness option' do expect(selector.max_staleness).to eq(options[:max_staleness]) end end end describe '#==' do context 'when max staleness is the same' do let(:options) do { max_staleness: 95 } end let(:other) do described_class.new(options) end it 'returns true' do expect(selector).to eq(other) end end context 'when max staleness is different' do let(:other_options) do { max_staleness: 100 } end let(:other) do described_class.new(other_options) end it 'returns false' do expect(selector).not_to eq(other) end end end describe '#to_mongos' do context 'tag set not provided' do let(:expected) do { :mode => 'nearest' } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end context 'tag set provided' do let(:tag_sets) do [tag_set] end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq( { :mode => 'nearest', :tags => tag_sets } ) end end context 'max staleness not provided' do let(:expected) do { :mode => 'nearest' } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end context 'max staleness provided' do let(:max_staleness) do 100 end let(:expected) do { :mode => 'nearest', maxStalenessSeconds: 100 } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end end describe '#select' do context 'no candidates' do let(:candidates) { [] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'single primary candidates' do let(:candidates) { [primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'single secondary candidate' do let(:candidates) { [secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'primary and secondary candidates' do let(:candidates) { [primary, secondary] } it 'returns an array with the primary and secondary' do expect(selector.send(:select, candidates)).to match_array([primary, secondary]) end end context 'multiple secondary candidates' do let(:candidates) { [secondary, secondary] } it 'returns an array with the secondaries' do expect(selector.send(:select, candidates)).to match_array([secondary, secondary]) end end context 'tag sets provided' do let(:tag_sets) { [tag_set] } let(:matching_primary) do make_server(:primary, :tags => server_tags, address: default_address) end let(:matching_secondary) do make_server(:secondary, :tags => server_tags, address: default_address) end context 'single candidate' do context 'primary' do let(:candidates) { [primary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'matching primary' do let(:candidates) { [matching_primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([matching_primary]) end end context 'secondary' do let(:candidates) { [secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'matching secondary' do let(:candidates) { [matching_secondary] } it 'returns an array with the matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end end context 'multiple candidates' do context 'no matching servers' do let(:candidates) { [primary, secondary, secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'one matching primary' do let(:candidates) { [matching_primary, secondary, secondary] } it 'returns an array with the matching primary' do expect(selector.send(:select, candidates)).to eq([matching_primary]) end end context 'one matching secondary' do let(:candidates) { [primary, matching_secondary, secondary] } it 'returns an array with the matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end context 'two matching secondaries' do let(:candidates) { [primary, matching_secondary, matching_secondary] } let(:expected) { [matching_secondary, matching_secondary] } it 'returns an array with the matching secondaries' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'one matching primary and one matching secondary' do let(:candidates) { [matching_primary, matching_secondary, secondary] } let(:expected) { [matching_primary, matching_secondary] } it 'returns an array with the matching primary and secondary' do expect(selector.send(:select, candidates)).to match_array(expected) end end end end context 'high latency servers' do let(:far_primary) { make_server(:primary, :average_round_trip_time => 0.113, address: default_address) } let(:far_secondary) { make_server(:secondary, :average_round_trip_time => 0.114, address: default_address) } context 'single candidate' do context 'far primary' do let(:candidates) { [far_primary] } it 'returns array with far primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end context 'far secondary' do let(:candidates) { [far_secondary] } it 'returns array with far primary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end end context 'multiple candidates' do context 'local primary, local secondary' do let(:candidates) { [primary, secondary] } it 'returns array with primary and secondary' do expect(selector.send(:select, candidates)).to match_array( [primary, secondary] ) end end context 'local primary, far secondary' do let(:candidates) { [primary, far_secondary] } it 'returns array with local primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'far primary, local secondary' do let(:candidates) { [far_primary, secondary] } it 'returns array with local secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'far primary, far secondary' do let(:candidates) { [far_primary, far_secondary] } let(:expected) { [far_primary, far_secondary] } it 'returns array with both servers' do expect(selector.send(:select, candidates)).to match_array(expected) end end context 'two local servers, one far server' do context 'local primary, local secondary' do let(:candidates) { [primary, secondary, far_secondary] } let(:expected) { [primary, secondary] } it 'returns array with local primary and local secondary' do expect(selector.send(:select, candidates)).to match_array(expected) end end context 'two near secondaries' do let(:candidates) { [far_primary, secondary, secondary] } let(:expected) { [secondary, secondary] } it 'returns array with the two local secondaries' do expect(selector.send(:select, candidates)).to match_array(expected) end end end end end end end mongo-2.5.1/spec/mongo/server_selector/primary_spec.rb0000644000004100000410000000717213257253113023200 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector::Primary do let(:name) { :primary } include_context 'server selector' it_behaves_like 'a server selector mode' do let(:slave_ok) { false } end it_behaves_like 'a server selector with sensitive data in its options' describe '#initialize' do context 'when max_staleness is provided' do let(:options) do { max_staleness: 100 } end it 'raises an exception' do expect { selector }.to raise_exception(Mongo::Error::InvalidServerPreference) end end end describe '#tag_sets' do context 'tags not provided' do it 'returns an empty array' do expect(selector.tag_sets).to be_empty end end context 'tag sets provided' do let(:tag_sets) do [ tag_set ] end it 'raises an error' do expect { selector.tag_sets }.to raise_error(Mongo::Error::InvalidServerPreference) end end end describe '#to_mongos' do it 'returns nil' do expect(selector.to_mongos).to be_nil end context 'max staleness not provided' do it 'returns nil' do expect(selector.to_mongos).to be_nil end end context 'max staleness provided' do let(:max_staleness) do 100 end it 'raises an error' do expect { selector }.to raise_exception(Mongo::Error::InvalidServerPreference) end end end describe '#select' do context 'no candidates' do let(:candidates) { [] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'secondary candidates' do let(:candidates) { [secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'primary candidate' do let(:candidates) { [primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'primary and secondary candidates' do let(:candidates) { [secondary, primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'high latency candidates' do let(:far_primary) { make_server(:primary, :average_round_trip_time => 0.100, address: default_address) } let(:far_secondary) { make_server(:secondary, :average_round_trip_time => 0.120, address: default_address) } context 'single candidate' do context 'far primary' do let(:candidates) { [far_primary] } it 'returns array with the primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end context 'far secondary' do let(:candidates) { [far_secondary] } it 'returns empty array' do expect(selector.send(:select, candidates)).to be_empty end end end context 'multiple candidates' do context 'far primary, far secondary' do let(:candidates) { [far_primary, far_secondary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end context 'far primary, local secondary' do let(:candidates) { [far_primary, far_secondary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end end end end end mongo-2.5.1/spec/mongo/server_selector/primary_preferred_spec.rb0000644000004100000410000002375613257253113025244 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector::PrimaryPreferred do let(:name) { :primary_preferred } include_context 'server selector' it_behaves_like 'a server selector mode' do let(:slave_ok) { true } end it_behaves_like 'a server selector accepting tag sets' it_behaves_like 'a server selector with sensitive data in its options' describe '#initialize' do context 'when max_staleness is provided' do let(:options) do { max_staleness: 95 } end it 'sets the max_staleness option' do expect(selector.max_staleness).to eq(options[:max_staleness]) end end end describe '#==' do context 'when max staleness is the same' do let(:options) do { max_staleness: 95 } end let(:other) do described_class.new(options) end it 'returns true' do expect(selector).to eq(other) end end context 'when max staleness is different' do let(:other_options) do { max_staleness: 100 } end let(:other) do described_class.new(other_options) end it 'returns false' do expect(selector).not_to eq(other) end end end describe '#to_mongos' do context 'tag sets not provided' do it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq({ :mode => 'primaryPreferred' }) end end context 'tag set provided' do let(:tag_sets) { [tag_set] } it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq( { :mode => 'primaryPreferred', :tags => tag_sets} ) end end context 'max staleness not provided' do let(:expected) do { :mode => 'primaryPreferred' } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end context 'max staleness provided' do let(:max_staleness) do 100 end let(:expected) do { :mode => 'primaryPreferred', maxStalenessSeconds: 100 } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end end describe '#select' do context 'no candidates' do let(:candidates) { [] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'single primary candidate' do let(:candidates) { [primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq( [primary] ) end end context 'single secondary candidate' do let(:candidates) { [secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq( [secondary] ) end end context 'primary and secondary candidates' do let(:candidates) { [secondary, primary] } let(:expected) { [primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'secondary and primary candidates' do let(:candidates) { [secondary, primary] } let(:expected) { [primary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'tag sets provided' do let(:tag_sets) { [tag_set] } let(:matching_primary) do make_server(:primary, :tags => server_tags, address: default_address ) end let(:matching_secondary) do make_server(:secondary, :tags => server_tags, address: default_address ) end context 'single candidate' do context 'primary' do let(:candidates) { [primary] } it 'returns array with primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'matching_primary' do let(:candidates) { [matching_primary] } it 'returns array with matching primary' do expect(selector.send(:select, candidates)).to eq([matching_primary]) end end context 'matching secondary' do let(:candidates) { [matching_secondary] } it 'returns array with matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end context 'secondary' do let(:candidates) { [secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end end context 'multiple candidates' do context 'no matching secondaries' do let(:candidates) { [primary, secondary, secondary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'one matching primary' do let(:candidates) { [matching_primary, secondary, secondary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq([matching_primary]) end end context 'one matching secondary' do let(:candidates) { [primary, matching_secondary, secondary] } let(:expected) { [primary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two matching secondaries' do let(:candidates) { [primary, matching_secondary, matching_secondary] } let(:expected) { [primary] } it 'returns an array of the primary ' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'one matching primary, one matching secondary' do let(:candidates) { [primary, matching_secondary, secondary] } let(:expected) { [primary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end end end context 'high latency servers' do let(:far_primary) { make_server(:primary, :average_round_trip_time => 0.100, address: default_address) } let(:far_secondary) { make_server(:secondary, :average_round_trip_time => 0.113, address: default_address) } context 'single candidate' do context 'far primary' do let(:candidates) { [far_primary] } it 'returns array with far primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end context 'far secondary' do let(:candidates) { [far_secondary] } it 'returns array with far primary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end end context 'multiple candidates' do context 'primary available' do context 'local primary, local secondary' do let(:candidates) { [primary, secondary] } let(:expected) { [primary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'local primary, far secondary' do let(:candidates) { [primary, far_secondary] } let(:expected) { [primary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'far primary, local secondary' do let(:candidates) { [far_primary, secondary] } let(:expected) { [far_primary] } it 'returns an array of the far primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'far primary, far secondary' do let(:candidates) { [far_primary, far_secondary] } let(:expected) { [far_primary] } it 'returns an array of the far primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two local servers, one far server' do context 'local primary, local secondary, far secondary' do let(:candidates) { [primary, secondary, far_secondary] } let(:expected) { [primary] } it 'returns an array of the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two local secondaries' do let(:candidates) { [far_primary, secondary, secondary] } let(:expected) { [far_primary] } it 'returns an array with primary' do expect(selector.send(:select, candidates)).to eq(expected) end end end end context 'primary not available' do context 'one secondary' do let(:candidates) { [secondary] } let(:expected) { [secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'one local secondary, one far secondary' do let(:candidates) { [secondary, far_secondary] } let(:expected) { [secondary] } it 'returns an array of the secondary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two local secondaries, one far secondary' do let(:candidates) { [secondary, secondary, far_secondary] } let(:expected) { [secondary, secondary] } it 'returns an array of the secondary' do expect(selector.send(:select, candidates)).to eq(expected) end end end end end end end mongo-2.5.1/spec/mongo/server_selector/secondary_spec.rb0000644000004100000410000001576113257253113023507 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector::Secondary do let(:name) { :secondary } include_context 'server selector' it_behaves_like 'a server selector mode' do let(:slave_ok) { true } end it_behaves_like 'a server selector with sensitive data in its options' it_behaves_like 'a server selector accepting tag sets' describe '#initialize' do context 'when max_staleness is provided' do let(:options) do { max_staleness: 100 } end it 'sets the max_staleness option' do expect(selector.max_staleness).to eq(options[:max_staleness]) end end end describe '#==' do context 'when max staleness is the same' do let(:options) do { max_staleness: 90 } end let(:other) do described_class.new(options) end it 'returns true' do expect(selector).to eq(other) end end context 'when max staleness is different' do let(:other_options) do { max_staleness: 95 } end let(:other) do described_class.new(other_options) end it 'returns false' do expect(selector).not_to eq(other) end end end describe '#to_mongos' do it 'returns read preference formatted for mongos' do expect(selector.to_mongos).to eq( { :mode => 'secondary' } ) end context 'tag sets provided' do let(:tag_sets) { [tag_set] } it 'returns read preference formatted for mongos with tag sets' do expect(selector.to_mongos).to eq( { :mode => 'secondary', :tags => tag_sets} ) end end context 'max staleness not provided' do let(:expected) do { :mode => 'secondary' } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end context 'max staleness provided' do let(:max_staleness) do 60 end let(:expected) do { :mode => 'secondary', maxStalenessSeconds: 60 } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end end describe '#select' do context 'no candidates' do let(:candidates) { [] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'single primary candidate' do let(:candidates) { [primary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'single secondary candidate' do let(:candidates) { [secondary] } it 'returns array with secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'primary and secondary candidates' do let(:candidates) { [primary, secondary] } it 'returns array with secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'multiple secondary candidates' do let(:candidates) { [secondary, secondary, primary] } it 'returns array with all secondaries' do expect(selector.send(:select, candidates)).to eq([secondary, secondary]) end end context 'tag sets provided' do let(:tag_sets) { [tag_set] } let(:matching_secondary) { make_server(:secondary, :tags => server_tags, address: default_address) } context 'single candidate' do context 'primary' do let(:candidates) { [primary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'secondary' do let(:candidates) { [secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'matching secondary' do let(:candidates) { [matching_secondary] } it 'returns an array with matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end end context 'multiple candidates' do context 'no matching candidates' do let(:candidates) { [primary, secondary, secondary] } it 'returns an emtpy array' do expect(selector.send(:select, candidates)).to be_empty end end context 'one matching secondary' do let(:candidates) { [secondary, matching_secondary]} it 'returns array with matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end context 'two matching secondaries' do let(:candidates) { [matching_secondary, matching_secondary] } it 'returns an array with both matching secondaries' do expect(selector.send(:select, candidates)).to eq([matching_secondary, matching_secondary]) end end end end context 'high latency servers' do let(:far_primary) { make_server(:primary, :average_round_trip_time => 0.100, address: default_address) } let(:far_secondary) { make_server(:secondary, :average_round_trip_time => 0.113, address: default_address) } context 'single candidate' do context 'far primary' do let(:candidates) { [far_primary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'far secondary' do let(:candidates) { [far_secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end end context 'multiple candidates' do context 'local primary, far secondary' do let(:candidates) { [primary, far_secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end context 'far primary, far secondary' do let(:candidates) { [far_primary, far_secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end context 'two near servers, one far server' do context 'near primary, near and far secondaries' do let(:candidates) { [primary, secondary, far_secondary] } it 'returns an array with near secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'far primary and two near secondaries' do let(:candidates) { [far_primary, secondary, secondary] } it 'returns an array with two secondaries' do expect(selector.send(:select, candidates)).to eq([secondary, secondary]) end end end end end end end mongo-2.5.1/spec/mongo/server_selector/secondary_preferred_spec.rb0000644000004100000410000002146513257253113025543 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::ServerSelector::SecondaryPreferred do let(:name) { :secondary_preferred } include_context 'server selector' it_behaves_like 'a server selector mode' do let(:slave_ok) { true } end it_behaves_like 'a server selector with sensitive data in its options' it_behaves_like 'a server selector accepting tag sets' describe '#initialize' do context 'when max_staleness is provided' do let(:options) do { max_staleness: 95 } end it 'sets the max_staleness option' do expect(selector.max_staleness).to eq(options[:max_staleness]) end end end describe '#==' do context 'when max staleness is the same' do let(:options) do { max_staleness: 90 } end let(:other) do described_class.new(options) end it 'returns true' do expect(selector).to eq(other) end end context 'when max staleness is different' do let(:other_options) do { max_staleness: 100 } end let(:other) do described_class.new(other_options) end it 'returns false' do expect(selector).not_to eq(other) end end end describe '#to_mongos' do context 'tag sets provided' do let(:tag_sets) do [ tag_set ] end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq( { :mode => 'secondaryPreferred', :tags => tag_sets } ) end end context 'tag sets not provided' do it 'returns nil' do expect(selector.to_mongos).to be_nil end end context 'max staleness not provided' do let(:expected) do { :mode => 'secondaryPreferred' } end it 'returns nil' do expect(selector.to_mongos).to be_nil end end context 'max staleness provided' do let(:max_staleness) do 60 end let(:expected) do { :mode => 'secondaryPreferred', maxStalenessSeconds: 60 } end it 'returns a read preference formatted for mongos' do expect(selector.to_mongos).to eq(expected) end end end describe '#select' do context 'no candidates' do let(:candidates) { [] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end context 'single primary candidates' do let(:candidates) { [primary] } it 'returns array with primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'single secondary candidate' do let(:candidates) { [secondary] } it 'returns array with secondary' do expect(selector.send(:select, candidates)).to eq([secondary]) end end context 'primary and secondary candidates' do let(:candidates) { [primary, secondary] } let(:expected) { [secondary, primary] } it 'returns array with secondary first, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'secondary and primary candidates' do let(:candidates) { [secondary, primary] } let(:expected) { [secondary, primary] } it 'returns array with secondary and primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'tag sets provided' do let(:tag_sets) do [ tag_set ] end let(:matching_primary) do make_server(:primary, :tags => server_tags, address: default_address) end let(:matching_secondary) do make_server(:secondary, :tags => server_tags, address: default_address) end context 'single candidate' do context 'primary' do let(:candidates) { [primary] } it 'returns array with primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'matching_primary' do let(:candidates) { [matching_primary] } it 'returns array with matching primary' do expect(selector.send(:select, candidates)).to eq([matching_primary]) end end context 'matching secondary' do let(:candidates) { [matching_secondary] } it 'returns array with matching secondary' do expect(selector.send(:select, candidates)).to eq([matching_secondary]) end end context 'secondary' do let(:candidates) { [secondary] } it 'returns an empty array' do expect(selector.send(:select, candidates)).to be_empty end end end context 'multiple candidates' do context 'no matching secondaries' do let(:candidates) { [primary, secondary, secondary] } it 'returns an array with the primary' do expect(selector.send(:select, candidates)).to eq([primary]) end end context 'one matching secondary' do let(:candidates) { [primary, matching_secondary] } it 'returns an array of the matching secondary, then primary' do expect(selector.send(:select, candidates)).to eq( [matching_secondary, primary] ) end end context 'two matching secondaries' do let(:candidates) { [primary, matching_secondary, matching_secondary] } let(:expected) { [matching_secondary, matching_secondary, primary] } it 'returns an array of the matching secondaries, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'one matching secondary and one matching primary' do let(:candidates) { [matching_primary, matching_secondary] } let(:expected) {[matching_secondary, matching_primary] } it 'returns an array of the matching secondary, then the primary' do expect(selector.send(:select, candidates)).to eq(expected) end end end end context 'high latency servers' do let(:far_primary) { make_server(:primary, :average_round_trip_time => 0.100, address: default_address) } let(:far_secondary) { make_server(:secondary, :average_round_trip_time => 0.113, address: default_address) } context 'single candidate' do context 'far primary' do let(:candidates) { [far_primary] } it 'returns array with primary' do expect(selector.send(:select, candidates)).to eq([far_primary]) end end context 'far secondary' do let(:candidates) { [far_secondary] } it 'returns an array with the secondary' do expect(selector.send(:select, candidates)).to eq([far_secondary]) end end end context 'multiple candidates' do context 'local primary, local secondary' do let(:candidates) { [primary, secondary] } it 'returns an array with secondary, then primary' do expect(selector.send(:select, candidates)).to eq([secondary, primary]) end end context 'local primary, far secondary' do let(:candidates) { [primary, far_secondary] } it 'returns an array with the secondary, then primary' do expect(selector.send(:select, candidates)).to eq([far_secondary, primary]) end end context 'local secondary' do let(:candidates) { [far_primary, secondary] } let(:expected) { [secondary, far_primary] } it 'returns an array with secondary, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'far primary, far secondary' do let(:candidates) { [far_primary, far_secondary] } let(:expected) { [far_secondary, far_primary] } it 'returns an array with secondary, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two near servers, one far secondary' do context 'near primary, near secondary, far secondary' do let(:candidates) { [primary, secondary, far_secondary] } let(:expected) { [secondary, primary] } it 'returns an array with near secondary, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end context 'two near secondaries, one far primary' do let(:candidates) { [far_primary, secondary, secondary] } let(:expected) { [secondary, secondary, far_primary] } it 'returns an array with secondaries, then primary' do expect(selector.send(:select, candidates)).to eq(expected) end end end end end end end mongo-2.5.1/spec/mongo/bulk_write/0000755000004100000410000000000013257253113017110 5ustar www-datawww-datamongo-2.5.1/spec/mongo/bulk_write/unordered_combiner_spec.rb0000644000004100000410000001361513257253113024322 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::BulkWrite::UnorderedCombiner do describe '#combine' do let(:combiner) do described_class.new(requests) end context 'when provided a series of delete one' do context 'when the documents are valid' do let(:requests) do [ { delete_one: { filter: { _id: 0 }}}, { delete_one: { filter: { _id: 1 }}} ] end it 'returns a single delete one' do expect(combiner.combine).to eq( [ { delete_one: [ { 'q' => { _id: 0 }, 'limit' => 1 }, { 'q' => { _id: 1 }, 'limit' => 1 } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { delete_one: { filter: { _id: 0 }}}, { delete_one: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of delete many' do context 'when the documents are valid' do let(:requests) do [ { delete_many: { filter: { _id: 0 }}}, { delete_many: { filter: { _id: 1 }}} ] end it 'returns a single delete many' do expect(combiner.combine).to eq( [ { delete_many: [ { 'q' => { _id: 0 }, 'limit' => 0 }, { 'q' => { _id: 1 }, 'limit' => 0 } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { delete_many: { filter: { _id: 0 }}}, { delete_many: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of insert one' do context 'when the documents are valid' do let(:requests) do [{ insert_one: { _id: 0 }}, { insert_one: { _id: 1 }}] end it 'returns a single insert one' do expect(combiner.combine).to eq( [{ insert_one: [{ _id: 0 }, { _id: 1 }]}] ) end end context 'when a document is not valid' do let(:requests) do [{ insert_one: { _id: 0 }}, { insert_one: 'whoami' }] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of update one' do context 'when the documents are valid' do let(:requests) do [ { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_one: { filter: { _id: 1 }, update: { '$set' => { name: 'test' }}}} ] end it 'returns a single update one' do expect(combiner.combine).to eq( [ { update_one: [ { 'q' => { _id: 0 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => false, 'upsert' => false }, { 'q' => { _id: 1 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => false, 'upsert' => false } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_one: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of update many ops' do context 'when the documents are valid' do let(:requests) do [ { update_many: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_many: { filter: { _id: 1 }, update: { '$set' => { name: 'test' }}}} ] end it 'returns a single update many' do expect(combiner.combine).to eq( [ { update_many: [ { 'q' => { _id: 0 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => true, 'upsert' => false }, { 'q' => { _id: 1 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => true, 'upsert' => false } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { update_many: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_many: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a mix of operations' do let(:requests) do [ { insert_one: { _id: 0 }}, { delete_one: { filter: { _id: 0 }}}, { insert_one: { _id: 1 }}, { delete_one: { filter: { _id: 1 }}} ] end it 'returns an unordered mixed grouping' do expect(combiner.combine).to eq( [ { insert_one: [ { _id: 0 }, { _id: 1 } ] }, { delete_one: [ { 'q' => { _id: 0 }, 'limit' => 1 }, { 'q' => { _id: 1 }, 'limit' => 1 } ] } ] ) end end end end mongo-2.5.1/spec/mongo/bulk_write/ordered_combiner_spec.rb0000644000004100000410000001616413257253113023761 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::BulkWrite::OrderedCombiner do describe '#combine' do let(:combiner) do described_class.new(requests) end context 'when provided a series of delete one' do context 'when the documents are valid' do let(:requests) do [ { delete_one: { filter: { _id: 0 }}}, { delete_one: { filter: { _id: 1 }}} ] end it 'returns a single delete one' do expect(combiner.combine).to eq( [ { delete_one: [ { 'q' => { _id: 0 }, 'limit' => 1 }, { 'q' => { _id: 1 }, 'limit' => 1 } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { delete_one: { filter: { _id: 0 }}}, { delete_one: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of delete many' do context 'when the documents are valid' do let(:requests) do [ { delete_many: { filter: { _id: 0 }}}, { delete_many: { filter: { _id: 1 }}} ] end it 'returns a single delete many' do expect(combiner.combine).to eq( [ { delete_many: [ { 'q' => { _id: 0 }, 'limit' => 0 }, { 'q' => { _id: 1 }, 'limit' => 0 } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { delete_many: { filter: { _id: 0 }}}, { delete_many: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of insert one' do context 'when providing only one operation' do let(:requests) do [{ insert_one: { _id: 0 }}] end it 'returns a single insert one' do expect(combiner.combine).to eq( [{ insert_one: [{ _id: 0 }]}] ) end end context 'when the documents are valid' do let(:requests) do [{ insert_one: { _id: 0 }}, { insert_one: { _id: 1 }}] end it 'returns a single insert one' do expect(combiner.combine).to eq( [{ insert_one: [{ _id: 0 }, { _id: 1 }]}] ) end end context 'when a document is not valid' do let(:requests) do [{ insert_one: { _id: 0 }}, { insert_one: 'whoami' }] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of replace one' do context 'when the documents are valid' do let(:requests) do [ { replace_one: { filter: { _id: 0 }, replacement: { name: 'test' }}}, { replace_one: { filter: { _id: 1 }, replacement: { name: 'test' }}} ] end it 'returns a single replace one' do expect(combiner.combine).to eq( [ { replace_one: [ { 'q' => { _id: 0 }, 'u' => { name: 'test' }, 'multi' => false, 'upsert' => false }, { 'q' => { _id: 1 }, 'u' => { name: 'test' }, 'multi' => false, 'upsert' => false } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { replace_one: { filter: { _id: 0 }, replacement: { name: 'test' }}}, { replace_one: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of update one' do context 'when the documents are valid' do let(:requests) do [ { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_one: { filter: { _id: 1 }, update: { '$set' => { name: 'test' }}}} ] end it 'returns a single update one' do expect(combiner.combine).to eq( [ { update_one: [ { 'q' => { _id: 0 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => false, 'upsert' => false }, { 'q' => { _id: 1 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => false, 'upsert' => false } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_one: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a series of update many ops' do context 'when the documents are valid' do let(:requests) do [ { update_many: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_many: { filter: { _id: 1 }, update: { '$set' => { name: 'test' }}}} ] end it 'returns a single update many' do expect(combiner.combine).to eq( [ { update_many: [ { 'q' => { _id: 0 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => true, 'upsert' => false }, { 'q' => { _id: 1 }, 'u' => { '$set' => { name: 'test' }}, 'multi' => true, 'upsert' => false } ] } ] ) end end context 'when a document is not valid' do let(:requests) do [ { update_many: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, { update_many: 'whoami' } ] end it 'raises an exception' do expect { combiner.combine }.to raise_error(Mongo::Error::InvalidBulkOperation) end end end context 'when provided a mix of operations' do let(:requests) do [ { insert_one: { _id: 0 }}, { delete_one: { filter: { _id: 0 }}}, { insert_one: { _id: 1 }} ] end it 'returns an ordered grouping' do expect(combiner.combine).to eq( [ { insert_one: [{ _id: 0 }]}, { delete_one: [{ 'q' => { _id: 0 }, 'limit' => 1 }]}, { insert_one: [{ _id: 1 }]} ] ) end end end end mongo-2.5.1/spec/mongo/uri/0000755000004100000410000000000013257253113015540 5ustar www-datawww-datamongo-2.5.1/spec/mongo/uri/srv_protocol_spec.rb0000644000004100000410000006575613257253113021655 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::URI::SRVProtocol do let(:scheme) { 'mongodb+srv://' } let(:uri) { described_class.new(string) } describe 'invalid uris' do context 'when there is more than one hostname' do let(:string) { "#{scheme}#{hosts}" } let(:hosts) { 'test5.test.build.10gen.cc,test6.test.build.10gen.cc' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'when the the hostname has a port' do let(:string) { "#{scheme}#{hosts}" } let(:hosts) { 'test5.test.build.10gen.cc:8123' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'when the host in URI does not have {hostname}, {domainname} and {tld}' do let(:string) { "#{scheme}#{hosts}" } let(:hosts) { '10gen.cc/' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'string is not uri' do let(:string) { 'tyler' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'empty string' do let(:string) { '' } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://' do let(:string) { "#{scheme}" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost::27017/' do let(:string) { "#{scheme}localhost::27017/" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://::' do let(:string) { "#{scheme}::" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost,localhost::' do let(:string) { "#{scheme}localhost,localhost::" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost::27017,abc' do let(:string) { "#{scheme}localhost::27017,abc" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost:-1' do let(:string) { "#{scheme}localhost:-1" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost:0/' do let(:string) { "#{scheme}localhost:0/" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost:65536' do let(:string) { "#{scheme}localhost:65536" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://localhost:foo' do let(:string) { "#{scheme}localhost:foo" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://mongodb://[::1]:-1' do let(:string) { "#{scheme}mongodb://[::1]:-1" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://[::1]:0/' do let(:string) { "#{scheme}[::1]:0/" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://[::1]:65536' do let(:string) { "#{scheme}[::1]:65536" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://[::1]:65536/' do let(:string) { "#{scheme}[::1]:65536/" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://[::1]:foo' do let(:string) { "#{scheme}[::1]:foo" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://example.com?w=1' do let(:string) { "#{scheme}example.com?w=1" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end context 'mongodb+srv://example.com/?w' do let(:string) { "#{scheme}example.com/?w" } it 'raises an error' do expect { uri }.to raise_error(Mongo::Error::InvalidURI) end end end describe 'valid uris', if: test_connecting_externally? do describe 'invalid query results' do context 'when there are too many TXT records' do let(:string) { "#{scheme}test6.test.build.10gen.cc/" } it 'raises an error' do expect { uri }.to raise_exception(Mongo::Error::InvalidTXTRecord) end end context 'when the TXT has an invalid option' do let(:string) { "#{scheme}test10.test.build.10gen.cc" } it 'raises an error' do expect { uri }.to raise_exception(Mongo::Error::InvalidTXTRecord) end end context 'when the SRV records domain does not match hostname used for the query' do let(:string) { "#{scheme}test12.test.build.10gen.cc" } it 'raises an error' do expect { uri }.to raise_exception(Mongo::Error::MismatchedDomain) end end context 'when the query returns no SRV records' do let(:string) { "#{scheme}test4.test.build.10gen.cc" } it 'raises an error' do expect { uri }.to raise_exception(Mongo::Error::NoSRVRecords) end end end describe '#servers' do let(:string) { "#{scheme}#{servers}" } context 'single server' do let(:servers) { 'test5.test.build.10gen.cc' } it 'returns an array with the parsed server' do expect(uri.servers).to eq(['localhost.test.build.10gen.cc:27017']) end end end describe '#client_options' do let(:db) { TEST_DB } let(:servers) { 'test5.test.build.10gen.cc' } let(:string) { "#{scheme}#{credentials}@#{servers}/#{db}" } let(:user) { 'tyler' } let(:password) { 's3kr4t' } let(:credentials) { "#{user}:#{password}" } let(:options) do uri.client_options end it 'includes the database in the options' do expect(options[:database]).to eq(TEST_DB) end it 'includes the user in the options' do expect(options[:user]).to eq(user) end it 'includes the password in the options' do expect(options[:password]).to eq(password) end it 'sets ssl to true' do expect(options[:ssl]).to eq(true) end end describe '#credentials' do let(:servers) { 'test5.test.build.10gen.cc' } let(:string) { "#{scheme}#{credentials}@#{servers}" } let(:user) { 'tyler' } context 'username provided' do let(:credentials) { "#{user}:" } it 'returns the username' do expect(uri.credentials[:user]).to eq(user) end end context 'username and password provided' do let(:password) { 's3kr4t' } let(:credentials) { "#{user}:#{password}" } it 'returns the username' do expect(uri.credentials[:user]).to eq(user) end it 'returns the password' do expect(uri.credentials[:password]).to eq(password) end end end describe '#database' do let(:servers) { 'test5.test.build.10gen.cc' } let(:string) { "#{scheme}#{servers}/#{db}" } let(:db) { 'auth-db' } context 'database provided' do it 'returns the database name' do expect(uri.database).to eq(db) end end end describe '#uri_options' do let(:servers) { 'test5.test.build.10gen.cc' } let(:string) { "#{scheme}#{servers}/?#{options}" } context 'when no options were provided' do let(:string) { "#{scheme}#{servers}" } it 'returns an empty hash' do expect(uri.uri_options).to be_empty end end context 'write concern options provided' do context 'numerical w value' do let(:options) { 'w=1' } let(:concern) { Mongo::Options::Redacted.new(:w => 1)} it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'w=majority' do let(:options) { 'w=majority' } let(:concern) { Mongo::Options::Redacted.new(:w => :majority) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'journal' do let(:options) { 'journal=true' } let(:concern) { Mongo::Options::Redacted.new(:j => true) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'fsync' do let(:options) { 'fsync=true' } let(:concern) { Mongo::Options::Redacted.new(:fsync => true) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end context 'wtimeoutMS' do let(:timeout) { 1234 } let(:options) { "w=2&wtimeoutMS=#{timeout}" } let(:concern) { Mongo::Options::Redacted.new(:w => 2, :timeout => timeout) } it 'sets the write concern options' do expect(uri.uri_options[:write]).to eq(concern) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:write]).to eq(concern) end end end context 'read preference option provided' do let(:options) { "readPreference=#{mode}" } context 'primary' do let(:mode) { 'primary' } let(:read) { Mongo::Options::Redacted.new(:mode => :primary) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'primaryPreferred' do let(:mode) { 'primaryPreferred' } let(:read) { Mongo::Options::Redacted.new(:mode => :primary_preferred) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'secondary' do let(:mode) { 'secondary' } let(:read) { Mongo::Options::Redacted.new(:mode => :secondary) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'secondaryPreferred' do let(:mode) { 'secondaryPreferred' } let(:read) { Mongo::Options::Redacted.new(:mode => :secondary_preferred) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'nearest' do let(:mode) { 'nearest' } let(:read) { Mongo::Options::Redacted.new(:mode => :nearest) } it 'sets the read preference' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end end context 'read preference tags provided' do context 'single read preference tag set' do let(:options) do 'readPreferenceTags=dc:ny,rack:1' end let(:read) do Mongo::Options::Redacted.new(:tag_sets => [{ 'dc' => 'ny', 'rack' => '1' }]) end it 'sets the read preference tag set' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end context 'multiple read preference tag sets' do let(:options) do 'readPreferenceTags=dc:ny&readPreferenceTags=dc:bos' end let(:read) do Mongo::Options::Redacted.new(:tag_sets => [{ 'dc' => 'ny' }, { 'dc' => 'bos' }]) end it 'sets the read preference tag sets' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end end end context 'read preference max staleness option provided' do let(:options) do 'readPreference=Secondary&maxStalenessSeconds=120' end let(:read) do Mongo::Options::Redacted.new(mode: :secondary, :max_staleness => 120) end it 'sets the read preference max staleness in seconds' do expect(uri.uri_options[:read]).to eq(read) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:read]).to eq(read) end context 'when the read preference and max staleness combination is invalid' do context 'when max staleness is combined with read preference mode primary' do let(:options) do 'readPreference=primary&maxStalenessSeconds=120' end it 'raises an exception when read preference is accessed on the client' do expect { Mongo::Client.new(string).server_selector }.to raise_exception(Mongo::Error::InvalidServerPreference) end end context 'when the max staleness value is too small' do let(:options) do 'readPreference=secondary&maxStalenessSeconds=89' end it 'does not raise an exception until the read preference is used' do expect(Mongo::Client.new(string).read_preference).to eq(BSON::Document.new(mode: :secondary, max_staleness: 89)) end end end end context 'replica set option provided' do let(:rs_name) { TEST_SET } let(:options) { "replicaSet=#{rs_name}" } it 'sets the replica set option' do expect(uri.uri_options[:replica_set]).to eq(rs_name) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:replica_set]).to eq(rs_name) end end context 'auth mechanism provided' do let(:options) { "authMechanism=#{mechanism}" } context 'plain' do let(:mechanism) { 'PLAIN' } let(:expected) { :plain } it 'sets the auth mechanism to :plain' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'mongodb-cr' do let(:mechanism) { 'MONGODB-CR' } let(:expected) { :mongodb_cr } it 'sets the auth mechanism to :mongodb_cr' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'gssapi' do let(:mechanism) { 'GSSAPI' } let(:expected) { :gssapi } it 'sets the auth mechanism to :gssapi' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'scram-sha-1' do let(:mechanism) { 'SCRAM-SHA-1' } let(:expected) { :scram } it 'sets the auth mechanism to :scram' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end end context 'mongodb-x509' do let(:mechanism) { 'MONGODB-X509' } let(:expected) { :mongodb_x509 } it 'sets the auth mechanism to :mongodb_x509' do expect(uri.uri_options[:auth_mech]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech]).to eq(expected) end it 'is case-insensitive' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) end context 'when a username is not provided' do it 'recognizes the mechanism with no username' do expect(Mongo::Client.new(string.downcase).options[:auth_mech]).to eq(expected) expect(Mongo::Client.new(string.downcase).options[:user]).to be_nil end end end end context 'auth source provided' do let(:options) { "authSource=#{source}" } context 'regular db' do let(:source) { 'foo' } it 'sets the auth source to the database' do expect(uri.uri_options[:auth_source]).to eq(source) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_source]).to eq(source) end end context '$external' do let(:source) { '$external' } let(:expected) { :external } it 'sets the auth source to :external' do expect(uri.uri_options[:auth_source]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_source]).to eq(expected) end end end context 'auth mechanism properties provided' do context 'service_name' do let(:options) do "authMechanismProperties=SERVICE_NAME:#{service_name}" end let(:service_name) { 'foo' } let(:expected) { Mongo::Options::Redacted.new({ service_name: service_name }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'canonicalize_host_name' do let(:options) do "authMechanismProperties=CANONICALIZE_HOST_NAME:#{canonicalize_host_name}" end let(:canonicalize_host_name) { 'true' } let(:expected) { Mongo::Options::Redacted.new({ canonicalize_host_name: true }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'service_realm' do let(:options) do "authMechanismProperties=SERVICE_REALM:#{service_realm}" end let(:service_realm) { 'dumdum' } let(:expected) { Mongo::Options::Redacted.new({ service_realm: service_realm }) } it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end context 'multiple properties' do let(:options) do "authMechanismProperties=SERVICE_REALM:#{service_realm}," + "CANONICALIZE_HOST_NAME:#{canonicalize_host_name}," + "SERVICE_NAME:#{service_name}" end let(:service_name) { 'foo' } let(:canonicalize_host_name) { 'true' } let(:service_realm) { 'dumdum' } let(:expected) do Mongo::Options::Redacted.new({ service_name: service_name, canonicalize_host_name: true, service_realm: service_realm }) end it 'sets the auth mechanism properties' do expect(uri.uri_options[:auth_mech_properties]).to eq(expected) end it 'sets the options on a client created with the uri' do expect(Mongo::Client.new(string).options[:auth_mech_properties]).to eq(expected) end end end context 'connectTimeoutMS' do let(:options) { "connectTimeoutMS=4567" } it 'sets the the connect timeout' do expect(uri.uri_options[:connect_timeout]).to eq(4.567) end end context 'socketTimeoutMS' do let(:options) { "socketTimeoutMS=8910" } it 'sets the socket timeout' do expect(uri.uri_options[:socket_timeout]).to eq(8.910) end end context 'when providing serverSelectionTimeoutMS' do let(:options) { "serverSelectionTimeoutMS=3561" } it 'sets the the connect timeout' do expect(uri.uri_options[:server_selection_timeout]).to eq(3.561) end end context 'when providing localThresholdMS' do let(:options) { "localThresholdMS=3561" } it 'sets the the connect timeout' do expect(uri.uri_options[:local_threshold]).to eq(3.561) end end context 'when providing maxPoolSize' do let(:max_pool_size) { 10 } let(:options) { "maxPoolSize=#{max_pool_size}" } it 'sets the max pool size option' do expect(uri.uri_options[:max_pool_size]).to eq(max_pool_size) end end context 'when providing minPoolSize' do let(:min_pool_size) { 5 } let(:options) { "minPoolSize=#{min_pool_size}" } it 'sets the min pool size option' do expect(uri.uri_options[:min_pool_size]).to eq(min_pool_size) end end context 'when providing waitQueueTimeoutMS' do let(:wait_queue_timeout) { 500 } let(:options) { "waitQueueTimeoutMS=#{wait_queue_timeout}" } it 'sets the wait queue timeout option' do expect(uri.uri_options[:wait_queue_timeout]).to eq(0.5) end end context 'ssl' do let(:options) { "ssl=#{ssl}" } context 'true' do let(:ssl) { true } it 'sets the ssl option to true' do expect(uri.uri_options[:ssl]).to be true end end context 'false' do let(:ssl) { false } it 'sets the ssl option to false' do expect(uri.uri_options[:ssl]).to be false end end end context 'grouped and non-grouped options provided' do let(:options) { 'w=1&ssl=true' } it 'do not overshadow top level options' do expect(uri.uri_options).not_to be_empty end end context 'when an invalid option is provided' do let(:options) { 'invalidOption=10' } let(:uri_options) do uri.uri_options end it 'does not raise an exception' do expect(uri_options).to be_empty end context 'when an invalid option is combined with valid options' do let(:options) { 'invalidOption=10&waitQueueTimeoutMS=500&ssl=true' } it 'does not raise an exception' do expect(uri_options).not_to be_empty end it 'sets the valid options' do expect(uri_options[:wait_queue_timeout]).to eq(0.5) expect(uri_options[:ssl]).to be true end end end context 'when an app name option is provided' do let(:options) { "appname=reports" } it 'sets the app name on the client' do expect(Mongo::Client.new(string).options[:app_name]).to eq(:reports) end end context 'when a supported compressors option is provided' do let(:options) { "compressors=zlib" } it 'sets the compressors as an array on the client' do expect(Mongo::Client.new(string).options[:compressors]).to eq(['zlib']) end end context 'when a non-supported compressors option is provided' do let(:options) { "compressors=snoopy" } let(:client) do Mongo::Client.new(string) end it 'sets no compressors on the client and warns' do expect(Mongo::Logger.logger).to receive(:warn) expect(client.options[:compressors]).to be_nil end end context 'when a zlibCompressionLevel option is provided' do let(:options) { "zlibCompressionLevel=6" } it 'sets the zlib compression level on the client' do expect(Mongo::Client.new(string).options[:zlib_compression_level]).to eq(6) end end end end end mongo-2.5.1/spec/mongo/retryable_spec.rb0000644000004100000410000001446413257253113020302 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Retryable do let(:klass) do Class.new do include Mongo::Retryable attr_reader :cluster attr_reader :operation def initialize(operation, cluster) @operation = operation @cluster = cluster end def max_read_retries cluster.max_read_retries end def read_retry_interval cluster.read_retry_interval end def read read_with_retry do operation.execute end end def write write_with_retry(nil, nil) do operation.execute end end end end let(:operation) do double('operation') end let(:cluster) do double('cluster', next_primary: server_selector) end let(:server_selector) do double('server_selector', select_server: double('server')) end let(:retryable) do klass.new(operation, cluster) end describe '#read_with_retry' do context 'when no exception occurs' do before do expect(operation).to receive(:execute).and_return(true) end it 'executes the operation once' do expect(retryable.read).to be true end end context 'when a socket error occurs' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::SocketError).ordered expect(cluster).to receive(:max_read_retries).and_return(1).ordered expect(cluster).to receive(:scan!).and_return(true).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'executes the operation twice' do expect(retryable.read).to be true end end context 'when a socket timeout error occurs' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::SocketTimeoutError).ordered expect(cluster).to receive(:max_read_retries).and_return(1).ordered expect(cluster).to receive(:scan!).and_return(true).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'executes the operation twice' do expect(retryable.read).to be true end end context 'when an operation failure occurs' do context 'when the cluster is not a mongos' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::OperationFailure).ordered expect(cluster).to receive(:sharded?).and_return(false) end it 'raises an exception' do expect { retryable.read }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the cluster is a mongos' do context 'when the operation failure is not retryable' do let(:error) do Mongo::Error::OperationFailure.new('not authorized') end before do expect(operation).to receive(:execute).and_raise(error).ordered expect(cluster).to receive(:sharded?).and_return(true) end it 'raises the exception' do expect { retryable.read }.to raise_error(Mongo::Error::OperationFailure) end end context 'when the operation failure is retryable' do let(:error) do Mongo::Error::OperationFailure.new('no master') end context 'when the retry succeeds' do before do expect(operation).to receive(:execute).and_raise(error).ordered expect(cluster).to receive(:sharded?).and_return(true) expect(cluster).to receive(:max_read_retries).and_return(1).ordered expect(cluster).to receive(:read_retry_interval).and_return(0.1).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'returns the result' do expect(retryable.read).to be true end end context 'when the retry fails once and then succeeds' do before do expect(operation).to receive(:execute).and_raise(error).ordered expect(cluster).to receive(:sharded?).and_return(true) expect(cluster).to receive(:max_read_retries).and_return(2).ordered expect(cluster).to receive(:read_retry_interval).and_return(0.1).ordered expect(operation).to receive(:execute).and_raise(error).ordered expect(cluster).to receive(:sharded?).and_return(true) expect(cluster).to receive(:max_read_retries).and_return(2).ordered expect(cluster).to receive(:read_retry_interval).and_return(0.1).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'returns the result' do expect(retryable.read).to be true end end end end end end describe '#write_with_retry' do context 'when no exception occurs' do before do expect(operation).to receive(:execute).and_return(true) end it 'executes the operation once' do expect(retryable.write).to be true end end context 'when a not master error occurs' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::OperationFailure.new('not master')).ordered expect(cluster).to receive(:scan!).and_return(true).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'executes the operation twice' do expect(retryable.write).to be true end end context 'when a not primary error occurs' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::OperationFailure.new('Not primary')).ordered expect(cluster).to receive(:scan!).and_return(true).ordered expect(operation).to receive(:execute).and_return(true).ordered end it 'executes the operation twice' do expect(retryable.write).to be true end end context 'when a normal operation failure occurs' do before do expect(operation).to receive(:execute).and_raise(Mongo::Error::OperationFailure).ordered end it 'raises an exception' do expect { retryable.write }.to raise_error(Mongo::Error::OperationFailure) end end end end mongo-2.5.1/spec/mongo/address/0000755000004100000410000000000013257253113016366 5ustar www-datawww-datamongo-2.5.1/spec/mongo/address/ipv6_spec.rb0000644000004100000410000000344513257253113020617 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Address::IPv6 do let(:resolver) do described_class.new(*described_class.parse(address)) end describe 'self.parse' do context 'when a port is provided' do it 'returns the host and port' do expect(described_class.parse('[::1]:27017')).to eq(['::1', 27017]) end end context 'when no port is provided' do it 'returns the host and port' do expect(described_class.parse('[::1]')).to eq(['::1', 27017]) end end end describe '#initialize' do context 'when a port is provided' do let(:address) do '[::1]:27017' end it 'sets the port' do expect(resolver.port).to eq(27017) end it 'sets the host' do expect(resolver.host).to eq('::1') end end context 'when no port is provided' do let(:address) do '[::1]' end it 'sets the port to 27017' do expect(resolver.port).to eq(27017) end it 'sets the host' do expect(resolver.host).to eq('::1') end end end describe '#socket' do let(:address) do '[::1]' end context 'when ssl options are provided' do let(:socket) do resolver.socket(5, :ssl => true) end it 'returns an ssl socket' do expect(socket).to be_a(Mongo::Socket::SSL) end it 'sets the family as ipv6' do expect(socket.family).to eq(Socket::PF_INET6) end end context 'when ssl options are not provided' do let(:socket) do resolver.socket(5) end it 'returns a tcp socket' do expect(socket).to be_a(Mongo::Socket::TCP) end it 'sets the family a ipv6' do expect(socket.family).to eq(Socket::PF_INET6) end end end end mongo-2.5.1/spec/mongo/address/ipv4_spec.rb0000644000004100000410000000351713257253113020615 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Address::IPv4 do let(:resolver) do described_class.new(*described_class.parse(address)) end describe 'self.parse' do context 'when a port is provided' do it 'returns the host and port' do expect(described_class.parse('127.0.0.1:27017')).to eq(['127.0.0.1', 27017]) end end context 'when no port is provided' do it 'returns the host and port' do expect(described_class.parse('127.0.0.1')).to eq(['127.0.0.1', 27017]) end end end describe '#initialize' do context 'when a port is provided' do let(:address) do '127.0.0.1:27017' end it 'sets the port' do expect(resolver.port).to eq(27017) end it 'sets the host' do expect(resolver.host).to eq('127.0.0.1') end end context 'when no port is provided' do let(:address) do '127.0.0.1' end it 'sets the port to 27017' do expect(resolver.port).to eq(27017) end it 'sets the host' do expect(resolver.host).to eq('127.0.0.1') end end end describe '#socket' do let(:address) do '127.0.0.1' end context 'when ssl options are provided' do let(:socket) do resolver.socket(5, :ssl => true) end it 'returns an ssl socket' do expect(socket).to be_a(Mongo::Socket::SSL) end it 'sets the family as ipv4' do expect(socket.family).to eq(Socket::PF_INET) end end context 'when ssl options are not provided' do let(:socket) do resolver.socket(5) end it 'returns a tcp socket' do expect(socket).to be_a(Mongo::Socket::TCP) end it 'sets the family a ipv4' do expect(socket.family).to eq(Socket::PF_INET) end end end end mongo-2.5.1/spec/mongo/address/unix_spec.rb0000644000004100000410000000135313257253113020712 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Address::Unix do let(:resolver) do described_class.new(*described_class.parse(address)) end describe 'self.parse' do it 'returns the host and no port' do expect(described_class.parse('/path/to/socket.sock')).to eq(['/path/to/socket.sock']) end end describe '#initialize' do let(:address) do '/path/to/socket.sock' end it 'sets the host' do expect(resolver.host).to eq('/path/to/socket.sock') end end describe '#socket' do let(:address) do '/tmp/mongodb-27017.sock' end let(:socket) do resolver.socket(5) end it 'returns a unix socket' do expect(socket).to be_a(Mongo::Socket::Unix) end end end mongo-2.5.1/spec/mongo/dbref_spec.rb0000644000004100000410000000614413257253113017367 0ustar www-datawww-datarequire 'spec_helper' require 'json' describe Mongo::DBRef do let(:object_id) do BSON::ObjectId.new end describe '#as_json' do context 'when the database is not provided' do let(:dbref) do described_class.new('users', object_id) end it 'returns the json document without database' do expect(dbref.as_json).to eq({ '$ref' => 'users', '$id' => object_id }) end end context 'when the database is provided' do let(:dbref) do described_class.new('users', object_id, 'database') end it 'returns the json document with database' do expect(dbref.as_json).to eq({ '$ref' => 'users', '$id' => object_id, '$db' => 'database' }) end end end describe '#initialize' do let(:dbref) do described_class.new('users', object_id) end it 'sets the collection' do expect(dbref.collection).to eq('users') end it 'sets the id' do expect(dbref.id).to eq(object_id) end context 'when a database is provided' do let(:dbref) do described_class.new('users', object_id, 'db') end it 'sets the database' do expect(dbref.database).to eq('db') end end end describe '#to_bson' do let(:dbref) do described_class.new('users', object_id, 'database') end it 'converts the underlying document to bson' do expect(dbref.to_bson.to_s).to eq(dbref.as_json.to_bson.to_s) end end describe '#to_json' do context 'when the database is not provided' do let(:dbref) do described_class.new('users', object_id) end it 'returns the json document without database' do expect(dbref.to_json).to eq("{\"$ref\":\"users\",\"$id\":#{object_id.to_json}}") end end context 'when the database is provided' do let(:dbref) do described_class.new('users', object_id, 'database') end it 'returns the json document with database' do expect(dbref.to_json).to eq("{\"$ref\":\"users\",\"$id\":#{object_id.to_json},\"$db\":\"database\"}") end end end describe '#from_bson' do let(:buffer) do dbref.to_bson end let(:decoded) do BSON::Document.from_bson(BSON::ByteBuffer.new(buffer.to_s)) end context 'when a database exists' do let(:dbref) do described_class.new('users', object_id, 'database') end it 'decodes the ref' do expect(decoded.collection).to eq('users') end it 'decodes the id' do expect(decoded.id).to eq(object_id) end it 'decodes the database' do expect(decoded.database).to eq('database') end end context 'when no database exists' do let(:dbref) do described_class.new('users', object_id) end it 'decodes the ref' do expect(decoded.collection).to eq('users') end it 'decodes the id' do expect(decoded.id).to eq(object_id) end it 'sets the database to nil' do expect(decoded.database).to be_nil end end end end mongo-2.5.1/spec/mongo/server_selection_rtt_spec.rb0000644000004100000410000000515313257253113022550 0ustar www-datawww-datarequire 'spec_helper' describe 'Server Selection moving average round trip time calculation' do include Mongo::ServerSelection::RTT SERVER_SELECTION_RTT_TESTS.each do |file| spec = Mongo::ServerSelection::RTT::Spec.new(file) context(spec.description) do before(:all) do module Mongo class Server # We monkey-patch the monitor here, so the last average rtt can be controlled. # We keep the API of Monitor#initialize but add in an extra option and set the last rtt. # # @since 2.0.0 class Monitor alias :original_initialize :initialize def initialize(address, listeners, options = {}) @description = Mongo::Server::Description.new(address, {}) @inspector = Mongo::Server::Description::Inspector.new(listeners) @options = options.freeze @connection = Connection.new(address, options) @last_round_trip_time = options[:avg_rtt_ms] @mutex = Mutex.new end # We monkey patch this method to use an instance variable instead of calculating time elapsed. # # @since 2.0.0 alias :original_average_round_trip_time :average_round_trip_time def average_round_trip_time(start) new_rtt = @new_rtt_ms RTT_WEIGHT_FACTOR * new_rtt + (1 - RTT_WEIGHT_FACTOR) * (@last_round_trip_time || new_rtt) end end end end end after(:all) do module Mongo class Server # Return the monitor implementation to its original for the other # tests in the suite. class Monitor alias :initialize :original_initialize remove_method(:original_initialize) alias :average_round_trip_time :original_average_round_trip_time remove_method(:original_average_round_trip_time) end end end end let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:monitor) do Mongo::Server::Monitor.new(address, Mongo::Event::Listeners.new, TEST_OPTIONS.merge(avg_rtt_ms: spec.avg_rtt_ms)) end before do monitor.instance_variable_set(:@new_rtt_ms, spec.new_rtt_ms) monitor.scan! end it 'correctly calculates the moving average round trip time' do expect(monitor.description.average_round_trip_time).to eq(spec.new_avg_rtt) end end end end mongo-2.5.1/spec/mongo/crud_spec.rb0000644000004100000410000000174113257253113017240 0ustar www-datawww-datarequire 'spec_helper' describe 'CRUD' do CRUD_TESTS.each do |file| spec = Mongo::CRUD::Spec.new(file) context(spec.description) do spec.tests.each do |test| around do |example| if spec.server_version_satisfied?(authorized_client) example.run end end context(test.description) do before(:each) do test.setup_test(authorized_collection) end after(:each) do authorized_collection.delete_many end let(:results) do test.run(authorized_collection) end it 'returns the correct result' do expect(results).to match_operation_result(test) end it 'has the correct data in the collection', if: test.outcome_collection_data do results expect(authorized_collection.find.to_a).to match_collection_data(test) end end end end end end mongo-2.5.1/spec/mongo/change_stream_examples_spec.rb0000644000004100000410000001414313257253113023001 0ustar www-datawww-datarequire 'spec_helper' describe 'change streams examples in Ruby', if: test_change_streams? do let!(:inventory) do client[:inventory] end let(:client) do authorized_client.with(max_pool_size: 5, wait_queue_timeout: 3) end before do inventory.drop end after do client.close inventory.drop end context 'example 1 - basic watching'do it 'returns a change after an insertion' do insert_thread = Thread.new do sleep 2 inventory.insert_one(x: 1) end stream_thread = Thread.new do # Start Changestream Example 1 cursor = inventory.watch.to_enum next_change = cursor.next # End Changestream Example 1 end insert_thread.value change = stream_thread.value expect(change['_id']).not_to be_nil expect(change['_id']['_data']).not_to be_nil expect(change['operationType']).to eq('insert') expect(change['fullDocument']).not_to be_nil expect(change['fullDocument']['_id']).not_to be_nil expect(change['fullDocument']['x']).to eq(1) expect(change['ns']).not_to be_nil expect(change['ns']['db']).to eq(TEST_DB) expect(change['ns']['coll']).to eq(inventory.name) expect(change['documentKey']).not_to be_nil expect(change['documentKey']['_id']).to eq(change['fullDocument']['_id']) end end context 'example 2 - full document update lookup specified' do it 'returns a change and the delta after an insertion' do inventory.insert_one(_id: 1, x: 2) update_thread = Thread.new do sleep 2 inventory.update_one({ _id: 1}, { '$set' => { x: 5 }}) end stream_thread = Thread.new do # Start Changestream Example 2 cursor = inventory.watch([], full_document: 'updateLookup').to_enum next_change = cursor.next # End Changestream Example 2 end update_thread.value change = stream_thread.value expect(change['_id']).not_to be_nil expect(change['_id']['_data']).not_to be_nil expect(change['operationType']).to eq('update') expect(change['fullDocument']).not_to be_nil expect(change['fullDocument']['_id']).to eq(1) expect(change['fullDocument']['x']).to eq(5) expect(change['ns']).not_to be_nil expect(change['ns']['db']).to eq(TEST_DB) expect(change['ns']['coll']).to eq(inventory.name) expect(change['documentKey']).not_to be_nil expect(change['documentKey']['_id']).to eq(1) expect(change['updateDescription']).not_to be_nil expect(change['updateDescription']['updatedFields']).not_to be_nil expect(change['updateDescription']['updatedFields']['x']).to eq(5) expect(change['updateDescription']['removedFields']).to eq([]) end end context 'example 3 - resuming from a previous change' do it 'returns the correct change when resuming' do stream = inventory.watch cursor = stream.to_enum inventory.insert_one(x: 1) next_change = cursor.next expect(next_change['_id']).not_to be_nil expect(next_change['_id']['_data']).not_to be_nil expect(next_change['operationType']).to eq('insert') expect(next_change['fullDocument']).not_to be_nil expect(next_change['fullDocument']['_id']).not_to be_nil expect(next_change['fullDocument']['x']).to eq(1) expect(next_change['ns']).not_to be_nil expect(next_change['ns']['db']).to eq(TEST_DB) expect(next_change['ns']['coll']).to eq(inventory.name) expect(next_change['documentKey']).not_to be_nil expect(next_change['documentKey']['_id']).to eq(next_change['fullDocument']['_id']) inventory.insert_one(x: 2) next_next_change = cursor.next stream.close expect(next_next_change['_id']).not_to be_nil expect(next_next_change['_id']['_data']).not_to be_nil expect(next_next_change['operationType']).to eq('insert') expect(next_next_change['fullDocument']).not_to be_nil expect(next_next_change['fullDocument']['_id']).not_to be_nil expect(next_next_change['fullDocument']['x']).to eq(2) expect(next_next_change['ns']).not_to be_nil expect(next_next_change['ns']['db']).to eq(TEST_DB) expect(next_next_change['ns']['coll']).to eq(inventory.name) expect(next_next_change['documentKey']).not_to be_nil expect(next_next_change['documentKey']['_id']).to eq(next_next_change['fullDocument']['_id']) # Start Changestream Example 3 resume_token = next_change['_id'] cursor = inventory.watch([], resume_after: resume_token).to_enum resumed_change = cursor.next # End Changestream Example 3 expect(resumed_change.length).to eq(next_next_change.length) resumed_change.each { |key| expect(resumed_change[key]).to eq(next_next_change[key]) } end end context 'example 4 - using a pipeline to filter changes' do it 'returns the filtered changes' do ops_thread = Thread.new do sleep 2 inventory.insert_one(username: 'wallace') inventory.insert_one(username: 'alice') inventory.delete_one(username: 'wallace') end stream_thread = Thread.new do # Start Changestream Example 4 pipeline = [ {'$match' => { '$or' => [{ 'fullDocument.username' => 'alice' }, { 'operationType' => 'delete' }] } }] cursor = inventory.watch(pipeline).to_enum cursor.next # End Changestream Example 4 end ops_thread.value change = stream_thread.value expect(change['_id']).not_to be_nil expect(change['_id']['_data']).not_to be_nil expect(change['operationType']).to eq('insert') expect(change['fullDocument']).not_to be_nil expect(change['fullDocument']['_id']).not_to be_nil expect(change['fullDocument']['username']).to eq('alice') expect(change['ns']).not_to be_nil expect(change['ns']['db']).to eq(TEST_DB) expect(change['ns']['coll']).to eq(inventory.name) expect(change['documentKey']).not_to be_nil expect(change['documentKey']['_id']).to eq(change['fullDocument']['_id']) end end end mongo-2.5.1/spec/mongo/monitoring/0000755000004100000410000000000013257253113017126 5ustar www-datawww-datamongo-2.5.1/spec/mongo/monitoring/command_log_subscriber_spec.rb0000644000004100000410000000312013257253113025163 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Monitoring::CommandLogSubscriber do describe '#started' do let(:filter) do (1...100).reduce({}) do |hash, i| hash[i] = i hash end end let(:command) do { find: 'users', filter: filter } end let(:event) do Mongo::Monitoring::Event::CommandStarted.new( 'find', 'users', Mongo::Address.new('127.0.0.1:27017'), 12345, 67890, command ) end before do Mongo::Logger.level = Logger::DEBUG end after do Mongo::Logger.level = Logger::INFO end context 'when truncating the logs' do context 'when no option is provided' do let(:subscriber) do described_class.new end it 'truncates the logs at 250 characters' do expect(subscriber).to receive(:truncate).with(command).and_call_original subscriber.started(event) end end context 'when true option is provided' do let(:subscriber) do described_class.new(truncate_logs: true) end it 'truncates the logs at 250 characters' do expect(subscriber).to receive(:truncate).with(command).and_call_original subscriber.started(event) end end end context 'when not truncating the logs' do let(:subscriber) do described_class.new(truncate_logs: false) end it 'does not truncate the logs' do expect(subscriber).to_not receive(:truncate) subscriber.started(event) end end end end mongo-2.5.1/spec/mongo/monitoring/event/0000755000004100000410000000000013257253113020247 5ustar www-datawww-datamongo-2.5.1/spec/mongo/monitoring/event/command_started_spec.rb0000644000004100000410000000102113257253113024744 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Monitoring::Event::CommandStarted do describe '#initialize' do let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:command) do BSON::Document.new(test: 'value') end context 'when the command should be redacted' do let(:event) do described_class.new('copydb', 'admin', address, 1, 2, command) end it 'sets the command to an empty document' do expect(event.command).to be_empty end end end end mongo-2.5.1/spec/mongo/monitoring/event/command_succeeded_spec.rb0000644000004100000410000000101613257253113025226 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Monitoring::Event::CommandSucceeded do describe '#initialize' do let(:address) do Mongo::Address.new('127.0.0.1:27017') end let(:reply) do BSON::Document.new(test: 'value') end context 'when the reply should be redacted' do let(:event) do described_class.new('copydb', 'admin', address, 1, 2, reply, 0.5) end it 'sets the reply to an empty document' do expect(event.reply).to be_empty end end end end mongo-2.5.1/spec/mongo/monitoring/event/secure_spec.rb0000644000004100000410000000404013257253113023072 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Monitoring::Event::Secure do let(:document) do BSON::Document.new(test: 'value') end let(:klass) do Class.new do include Mongo::Monitoring::Event::Secure end end describe '#redacted' do let(:secure) do klass.new end context 'when the command must be redacted' do context 'when the command name is a string' do let(:redacted) do secure.redacted('saslStart', document) end it 'returns an empty document' do expect(redacted).to be_empty end end context 'when the command name is a symbol' do let(:redacted) do secure.redacted(:saslStart, document) end it 'returns an empty document' do expect(redacted).to be_empty end end end context 'when the command is not in the redacted list' do let(:redacted) do secure.redacted(:find, document) end it 'returns the document' do expect(redacted).to eq(document) end end end describe '#compression_allowed?' do context 'when the selector represents a command for which compression is not allowed' do let(:secure) do klass.new end Mongo::Monitoring::Event::Secure::REDACTED_COMMANDS.each do |command| let(:selector) do { command => 1 } end context "when the command is #{command}" do it 'does not allow compression for the command' do expect(secure.compression_allowed?(selector.keys.first)).to be(false) end end end end context 'when the selector represents a command for which compression is allowed' do let(:selector) do { ping: 1 } end let(:secure) do klass.new end context 'when the command is :ping' do it 'does not allow compression for the command' do expect(secure.compression_allowed?(selector.keys.first)).to be(true) end end end end end mongo-2.5.1/spec/mongo/database_spec.rb0000644000004100000410000003443713257253113020057 0ustar www-datawww-datarequire 'spec_helper' describe Mongo::Database do describe '#==' do let(:database) do described_class.new(authorized_client, TEST_DB) end context 'when the names are the same' do let(:other) do described_class.new(authorized_client, TEST_DB) end it 'returns true' do expect(database).to eq(other) end end context 'when the names are not the same' do let(:other) do described_class.new(authorized_client, :other) end it 'returns false' do expect(database).to_not eq(other) end end context 'when the object is not a database' do it 'returns false' do expect(database).to_not eq('test') end end end describe '#[]' do let(:database) do described_class.new(authorized_client, TEST_DB) end context 'when providing a valid name' do let(:collection) do database[:users] end it 'returns a new collection' do expect(collection.name).to eq('users') end end context 'when providing an invalid name' do it 'raises an error' do expect do database[nil] end.to raise_error(Mongo::Error::InvalidCollectionName) end end context 'when the client has options' do let(:client) do Mongo::Client.new([default_address.host], TEST_OPTIONS.merge(read: { mode: :secondary })) end let(:database) do client.database end let(:collection) do database[:with_read_pref] end it 'applies the options to the collection' do expect(collection.server_selector).to eq(Mongo::ServerSelector.get(mode: :secondary)) expect(collection.read_preference).to eq(BSON::Document.new(mode: :secondary)) end end end describe '#collection_names' do let(:database) do described_class.new(authorized_client, TEST_DB) end before do database[:users].create end after do database[:users].drop end it 'returns the stripped names of the collections' do expect(database.collection_names).to include('users') end it 'does not include system collections' do expect(database.collection_names).to_not include('system.indexes') end context 'when provided a session' do let(:operation) do database.collection_names(session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when specifying a batch size' do it 'returns the stripped names of the collections' do expect(database.collection_names(batch_size: 1).to_a).to include('users') end end context 'when there are more collections than the initial batch size' do before do 2.times do |i| database["#{i}_dalmatians"].create end end after do 2.times do |i| database["#{i}_dalmatians"].drop end end it 'returns all collections' do expect(database.collection_names(batch_size: 1).select { |c| c =~ /dalmatians/}.size).to eq(2) end end end describe '#list_collections' do let(:database) do described_class.new(authorized_client, TEST_DB) end let(:result) do database.list_collections.map do |info| info['name'] end end before do database[:users].create end after do database[:users].drop end it 'returns a list of the collections info', if: list_command_enabled? do expect(result).to include('users') end it 'returns a list of the collections info', unless: list_command_enabled? do expect(result).to include("#{TEST_DB}.users") end end describe '#collections' do context 'when the database exists' do let(:database) do described_class.new(authorized_client, TEST_DB) end let(:collection) do Mongo::Collection.new(database, 'users') end before do database[:users].create end after do database[:users].drop end it 'returns collection objects for each name' do expect(database.collections).to include(collection) end end context 'when the database does not exist' do let(:database) do described_class.new(authorized_client, 'invalid_database') end it 'returns an empty list' do expect(database.collections).to be_empty end end context 'when the user is not authorized', if: auth_enabled? do let(:database) do described_class.new(unauthorized_client, TEST_DB) end it 'raises an exception' do expect { database.collections }.to raise_error(Mongo::Error::OperationFailure) end end end describe '#command' do let(:database) do described_class.new(authorized_client, TEST_DB) end it 'sends the query command to the cluster' do expect(database.command(:ismaster => 1).written_count).to eq(0) end it 'does not mutate the command selector' do expect(database.command({:ismaster => 1}.freeze).written_count).to eq(0) end context 'when provided a session', if: sessions_enabled? do let(:operation) do client.database.command({ :ismaster => 1 }, session: session) end let(:failed_operation) do client.database.command({ :invalid => 1 }, session: session) end let(:session) do client.start_session end let(:client) do subscribed_client end it_behaves_like 'an operation using a session' it_behaves_like 'a failed operation using a session' let(:full_command) do EventSubscriber.started_events.find { |cmd| cmd.command_name == :ismaster }.command end it 'does not add a afterClusterTime field' do # Ensure that the session has an operation time client.database.command({ ping: 1 }, session: session) operation expect(full_command['readConcern']).to be_nil end end context 'when a read concern is provided', if: find_command_enabled? do context 'when the read concern is valid' do it 'sends the read concern' do expect { database.command(:ismaster => 1, readConcern: { level: 'local' }) }.to_not raise_error end end context 'when the read concern is not valid' do it 'raises an exception', if: (find_command_enabled? && !sharded?) do expect { database.command(:ismaster => 1, readConcern: { level: 'yay' }) }.to raise_error(Mongo::Error::OperationFailure) end end end context 'when no read preference is provided', unless: sharded? do let!(:primary_server) do database.cluster.next_primary end before do expect(primary_server).to receive(:with_connection).at_least(:once).and_call_original end it 'uses read preference of primary' do expect(database.command(ping: 1)).to be_successful end end context 'when the client has a read preference set', unless: sharded? do let!(:primary_server) do database.cluster.next_primary end let(:read_preference) do { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } end let(:client) do authorized_client.with(read: read_preference) end let(:database) do described_class.new(client, TEST_DB, client.options) end before do expect(primary_server).to receive(:with_connection).at_least(:once).and_call_original end it 'does not use the client read preference 'do expect(database.command(ping: 1)).to be_successful end end context 'when there is a read preference argument provided', unless: sharded? do let(:read_preference) do { :mode => :secondary, :tag_sets => [{ 'non' => 'existent' }] } end let(:client) do authorized_client.with(server_selection_timeout: 0.2) end let(:database) do described_class.new(client, TEST_DB, client.options) end before do allow(database.cluster).to receive(:single?).and_return(false) end it 'uses the read preference argument' do expect { database.command({ ping: 1 }, read: read_preference) }.to raise_error(Mongo::Error::NoServerAvailable) end end context 'when the client has a server_selection_timeout set', unless: sharded? do let(:client) do authorized_client.with(server_selection_timeout: 0) end let(:database) do described_class.new(client, TEST_DB, client.options) end it 'uses the client server_selection_timeout' do expect { database.command(ping: 1) }.to raise_error(Mongo::Error::NoServerAvailable) end end context 'when a write concern is not defined on the client/database object' do context 'when a write concern is provided in the selector', if: standalone? do let(:cmd) do { insert: TEST_COLL, documents: [ { a: 1 } ], writeConcern: INVALID_WRITE_CONCERN } end it 'uses the write concern' do expect { database.command(cmd) }.to raise_exception(Mongo::Error::OperationFailure) end end end context 'when a write concern is defined on the client/database object' do let(:client_options) do { write: INVALID_WRITE_CONCERN } end let(:database) do described_class.new(authorized_client.with(client_options), TEST_DB) end context 'when a write concern is not in the command selector' do let(:cmd) do { insert: TEST_COLL, documents: [ { a: 1 } ] } end it 'does not apply a write concern' do expect(database.command(cmd).written_count).to eq(1) end end context 'when a write concern is provided in the command selector', if: standalone? do let(:cmd) do { insert: TEST_COLL, documents: [ { a: 1 } ], writeConcern: INVALID_WRITE_CONCERN } end it 'uses the write concern' do expect { database.command(cmd) }.to raise_exception(Mongo::Error::OperationFailure) end end end end describe '#drop' do let(:database) do described_class.new(authorized_client, TEST_DB) end it 'drops the database' do expect(database.drop).to be_successful end context 'when provided a session' do let(:operation) do database.drop(session: session) end let(:client) do authorized_client end it_behaves_like 'an operation using a session' end context 'when the client/database has a write concern' do let(:client_options) do { write: INVALID_WRITE_CONCERN, database: :safe_to_drop } end let(:client) do root_authorized_client.with(client_options) end let(:database_with_write_options) do client.database end context 'when the server supports write concern on the dropDatabase command', if: (collation_enabled? && standalone?) do it 'applies the write concern' do expect{ database_with_write_options.drop }.to raise_exception(Mongo::Error::OperationFailure) end end context 'when the server does not support write concern on the dropDatabase command', unless: collation_enabled? do it 'does not apply the write concern' do expect(database_with_write_options.drop).to be_successful end end end end describe '#initialize' do context 'when provided a valid name' do let(:database) do described_class.new(authorized_client, TEST_DB) end it 'sets the name as a string' do expect(database.name).to eq(TEST_DB) end it 'sets the client' do expect(database.client).to eq(authorized_client) end end context 'when the name is nil' do it 'raises an error' do expect do described_class.new(authorized_client, nil) end.to raise_error(Mongo::Error::InvalidDatabaseName) end end end describe '#inspect' do let(:database) do described_class.new(authorized_client, TEST_DB) end it 'includes the object id' do expect(database.inspect).to include(database.object_id.to_s) end it 'includes the name' do expect(database.inspect).to include(database.name) end end describe '#fs', unless: sharded? do let(:database) do described_class.new(authorized_client, TEST_DB) end shared_context 'a GridFS database' do it 'returns a Grid::FS for the db' do expect(fs).to be_a(Mongo::Grid::FSBucket) end context 'when operating on the fs' do let(:file) do Mongo::Grid::File.new('Hello!', :filename => 'test.txt') end after do fs.files_collection.delete_many fs.chunks_collection.delete_many end let(:from_db) do fs.insert_one(file) fs.find({ filename: 'test.txt' }, limit: 1).first end it 'returns the assembled file from the db' do expect(from_db['filename']).to eq(file.info.filename) end end end context 'when no options are provided' do let(:fs) do database.fs end it_behaves_like 'a GridFS database' end context 'when a custom prefix is provided' do context 'when the option is fs_name' do let(:fs) do database.fs(:fs_name => 'grid') end it 'sets the custom prefix' do expect(fs.prefix).to eq('grid') end it_behaves_like 'a GridFS database' end context 'when the option is bucket_name' do let(:fs) do database.fs(:bucket_name => 'grid') end it 'sets the custom prefix' do expect(fs.prefix).to eq('grid') end it_behaves_like 'a GridFS database' end end end end mongo-2.5.1/spec/spec_helper.rb0000644000004100000410000002023713257253113016444 0ustar www-datawww-dataTEST_SET = 'ruby-driver-rs' COVERAGE_MIN = 90 CURRENT_PATH = File.expand_path(File.dirname(__FILE__)) SERVER_DISCOVERY_TESTS = Dir.glob("#{CURRENT_PATH}/support/sdam/**/*.yml") SDAM_MONITORING_TESTS = Dir.glob("#{CURRENT_PATH}/support/sdam_monitoring/*.yml") SERVER_SELECTION_RTT_TESTS = Dir.glob("#{CURRENT_PATH}/support/server_selection/rtt/*.yml") SERVER_SELECTION_TESTS = Dir.glob("#{CURRENT_PATH}/support/server_selection/selection/**/*.yml") MAX_STALENESS_TESTS = Dir.glob("#{CURRENT_PATH}/support/max_staleness/**/*.yml") CRUD_TESTS = Dir.glob("#{CURRENT_PATH}/support/crud_tests/**/*.yml") RETRYABLE_WRITES_TESTS = Dir.glob("#{CURRENT_PATH}/support/retryable_writes_tests/**/*.yml") COMMAND_MONITORING_TESTS = Dir.glob("#{CURRENT_PATH}/support/command_monitoring/**/*.yml") CONNECTION_STRING_TESTS = Dir.glob("#{CURRENT_PATH}/support/connection_string_tests/*.yml") DNS_SEEDLIST_DISCOVERY_TESTS = Dir.glob("#{CURRENT_PATH}/support/dns_seedlist_discovery_tests/*.yml") GRIDFS_TESTS = Dir.glob("#{CURRENT_PATH}/support/gridfs_tests/*.yml") if ENV['DRIVERS_TOOLS'] CLIENT_CERT_PEM = ENV['DRIVER_TOOLS_CLIENT_CERT_PEM'] CLIENT_KEY_PEM = ENV['DRIVER_TOOLS_CLIENT_KEY_PEM'] CA_PEM = ENV['DRIVER_TOOLS_CA_PEM'] CLIENT_KEY_ENCRYPTED_PEM = ENV['DRIVER_TOOLS_CLIENT_KEY_ENCRYPTED_PEM'] else SSL_CERTS_DIR = "#{CURRENT_PATH}/support/certificates" CLIENT_PEM = "#{SSL_CERTS_DIR}/client.pem" CLIENT_PASSWORD_PEM = "#{SSL_CERTS_DIR}/password_protected.pem" CA_PEM = "#{SSL_CERTS_DIR}/ca.pem" CRL_PEM = "#{SSL_CERTS_DIR}/crl.pem" CLIENT_KEY_PEM = "#{SSL_CERTS_DIR}/client_key.pem" CLIENT_CERT_PEM = "#{SSL_CERTS_DIR}/client_cert.pem" CLIENT_KEY_ENCRYPTED_PEM = "#{SSL_CERTS_DIR}/client_key_encrypted.pem" CLIENT_KEY_PASSPHRASE = "passphrase" end require 'mongo' Mongo::Logger.logger = Logger.new($stdout) Mongo::Logger.logger.level = Logger::INFO Encoding.default_external = Encoding::UTF_8 require 'support/travis' require 'support/matchers' require 'support/event_subscriber' require 'support/authorization' require 'support/server_discovery_and_monitoring' require 'support/server_selection_rtt' require 'support/server_selection' require 'support/sdam_monitoring' require 'support/crud' require 'support/command_monitoring' require 'support/connection_string' require 'support/gridfs' RSpec.configure do |config| config.color = true config.fail_fast = true unless ENV['CI'] config.formatter = 'documentation' config.include(Authorization) config.before(:suite) do begin # Create the root user administrator as the first user to be added to the # database. This user will need to be authenticated in order to add any # more users to any other databases. ADMIN_UNAUTHORIZED_CLIENT.database.users.create(ROOT_USER) ADMIN_UNAUTHORIZED_CLIENT.close rescue Exception => e end begin # Adds the test user to the test database with permissions on all # databases that will be used in the test suite. ADMIN_AUTHORIZED_TEST_CLIENT.database.users.create(TEST_USER) rescue Exception => e end end end # Determine whether the test clients are connecting to a standalone. # # @since 2.0.0 def standalone? $mongo_client ||= initialize_scanned_client! $standalone ||= $mongo_client.cluster.servers.first.standalone? end # Determine whether the test clients are connecting to a replica set. # # @since 2.0.0 def replica_set? $mongo_client ||= initialize_scanned_client! $replica_set ||= $mongo_client.cluster.replica_set? end # Determine whether the test clients are connecting to a sharded cluster # or a single mongos. # # @since 2.0.0 def sharded? $mongo_client ||= initialize_scanned_client! $sharded ||= ($mongo_client.cluster.sharded? || single_mongos?) end # Determine whether the single address provided is a replica set member. # @note To run the specs relying on this to return true, # start a replica set and set the environment variable # MONGODB_ADDRESSES to the address of a single member. # # @since 2.0.0 def single_rs_member? $mongo_client ||= initialize_scanned_client! $single_rs_member ||= (single_seed? && $mongo_client.cluster.servers.first.replica_set_name) end # Determine whether the single address provided is a mongos. # @note To run the specs relying on this to return true, # start a sharded cluster and set the environment variable # MONGODB_ADDRESSES to the address of a single mongos. # # @since 2.0.0 def single_mongos? $mongo_client ||= initialize_scanned_client! $single_mongos ||= (single_seed? && $mongo_client.cluster.servers.first.mongos?) end # Determine whether a single address was provided. # # @since 2.0.0 def single_seed? ADDRESSES.size == 1 end # For instances where behaviour is different on different versions, we need to # determine in the specs if we are 3.6 or higher. # # @since 2.5.0 def op_msg_enabled? $mongo_client ||= initialize_scanned_client! $op_msg_enabled ||= $mongo_client.cluster.servers.first.features.op_msg_enabled? end alias :change_stream_enabled? :op_msg_enabled? alias :sessions_enabled? :op_msg_enabled? # Whether sessions can be tested. Sessions are available on server versions 3.6 # and higher and when connected to a replica set or sharded cluster. # # @since 2.5.0 def test_sessions? sessions_enabled? && (replica_set? || sharded?) end # Whether change streams can be tested. Change streams are available on server versions 3.6 # and higher and when connected to a replica set. # # @since 2.5.0 def test_change_streams? !BSON::Environment.jruby? && change_stream_enabled? & replica_set? end # For instances where behaviour is different on different versions, we need to # determine in the specs if we are 3.6 or higher. # # @since 2.5.0 def array_filters_enabled? $mongo_client ||= initialize_scanned_client! $array_filters_enabled ||= $mongo_client.cluster.servers.first.features.array_filters_enabled? end # For instances where behaviour is different on different versions, we need to # determine in the specs if we are 3.4 or higher. # # @since 2.4.0 def collation_enabled? $mongo_client ||= initialize_scanned_client! $collation_enabled ||= $mongo_client.cluster.servers.first.features.collation_enabled? end # For instances where behaviour is different on different versions, we need to # determine in the specs if we are 3.2 or higher. # # @since 2.0.0 def find_command_enabled? $mongo_client ||= initialize_scanned_client! $find_command_enabled ||= $mongo_client.cluster.servers.first.features.find_command_enabled? end # For instances where behaviour is different on different versions, we need to # determine in the specs if we are 2.7 or higher. # # @since 2.0.0 def list_command_enabled? $mongo_client ||= initialize_scanned_client! $list_command_enabled ||= $mongo_client.cluster.servers.first.features.list_indexes_enabled? end # Is the test suite running locally (not on Travis). # # @since 2.1.0 def testing_ssl_locally? running_ssl? && !(ENV['CI']) end # Should tests relying on external connections be run. # # @since 2.5.1 def test_connecting_externally? !ENV['CI'] && !ENV['EXTERNAL_DISABLED'] end # Is the test suite running on SSL. # # @since 2.0.2 def running_ssl? SSL end # Is the test suite using compression. # # @since 2.5.0 def compression_enabled? COMPRESSORS[:compressors] end # Is the test suite testing compression. # Requires that the server supports compression and compression is used by the test client. # # @since 2.5.0 def testing_compression? compression_enabled? && op_msg_enabled? end alias :scram_sha_1_enabled? :list_command_enabled? # Try running a command on the admin database to see if the mongod was started with auth. # # @since 2.2.0 def auth_enabled? if auth = ENV['AUTH'] auth == 'auth' else $mongo_client ||= initialize_scanned_client! begin $mongo_client.use(:admin).command(getCmdLineOpts: 1).first["argv"].include?("--auth") rescue => e e.message =~ /(not authorized)|(unauthorized)/ end end end # Initializes a basic scanned client to do an ismaster check. # # @since 2.0.0 def initialize_scanned_client! Mongo::Client.new(ADDRESSES, TEST_OPTIONS.merge(database: TEST_DB)) end # require all shared examples Dir['./spec/support/shared/*.rb'].sort.each { |file| require file } mongo-2.5.1/spec/support/0000755000004100000410000000000013257253113015336 5ustar www-datawww-datamongo-2.5.1/spec/support/gridfs.rb0000644000004100000410000004233113257253113017144 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Matcher for determining whether the operation completed successfully. # # @since 2.1.0 RSpec::Matchers.define :completes_successfully do |test| match do |actual| actual == test.expected_result || test.expected_result.nil? end end # Matcher for determining whether the actual chunks collection matches # the expected chunks collection. # # @since 2.1.0 RSpec::Matchers.define :match_chunks_collection do |expected| match do |actual| return true if expected.nil? if expected.find.to_a.empty? actual.find.to_a.empty? else actual.find.all? do |doc| if matching_doc = expected.find(files_id: doc['files_id'], n: doc['n']).first matching_doc.all? do |k, v| doc[k] == v || k == '_id' end else false end end end end end # Matcher for determining whether the actual files collection matches # the expected files collection. # # @since 2.1.0 RSpec::Matchers.define :match_files_collection do |expected| match do |actual| return true if expected.nil? actual.find.all? do |doc| if matching_doc = expected.find(_id: doc['_id']).first matching_doc.all? do |k, v| doc[k] == v end else false end end end end # Matcher for determining whether the operation raised the correct error. # # @since 2.1.0 RSpec::Matchers.define :match_error do |error| match do |actual| Mongo::GridFS::Test::ERROR_MAPPING[error] == actual.class end end module Mongo module GridFS # Represents a GridFS specification test. # # @since 2.1.0 class Spec # @return [ String ] description The spec description. # # @since 2.1.0 attr_reader :description # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.1.0 def initialize(file) @spec = YAML.load(ERB.new(File.new(file).read).result) @description = File.basename(file) @data = @spec['data'] end # Get a list of Tests for each test definition. # # @example Get the list of Tests. # spec.tests # # @return [ Array ] The list of Tests. # # @since 2.1.0 def tests @tests ||= @spec['tests'].collect do |test| Test.new(@data, test) end end end # Contains shared helper functions for converting YAML test values to Ruby objects. # # @since 2.1.0 module Convertible # Convert an integer to the corresponding CRUD method suffix. # # @param [ Integer ] int The limit. # # @return [ String ] The CRUD method suffix. # # @since 2.1.0 def limit(int) int == 0 ? 'many' : 'one' end # Convert an id value to a BSON::ObjectId. # # @param [ Object ] v The value to convert. # @param [ Hash ] opts The options. # # @option opts [ BSON::ObjectId ] :id The id override. # # @return [ BSON::ObjectId ] The object id. # # @since 2.1.0 def convert__id(v, opts = {}) to_oid(v, opts[:id]) end # Convert a value to a date. # # @param [ Object ] v The value to convert. # @param [ Hash ] opts The options. # # @return [ Time ] The upload date time value. # # @since 2.1.0 def convert_uploadDate(v, opts = {}) v.is_a?(Time) ? v : v['$date'] ? Time.parse(v['$date']) : upload_date end # Convert an file id value to a BSON::ObjectId. # # @param [ Object ] v The value to convert. # @param [ Hash ] opts The options. # # @option opts [ BSON::ObjectId ] :id The id override. # # @return [ BSON::ObjectId ] The object id. # # @since 2.1.0 def convert_files_id(v, opts = {}) to_oid(v, opts[:files_id]) end # Convert a value to BSON::Binary data. # # @param [ Object ] v The value to convert. # @param [ Hash ] opts The options. # # @return [ BSON::Binary ] The converted data. # # @since 2.1.0 def convert_data(v, opts = {}) v.is_a?(BSON::Binary) ? v : BSON::Binary.new(to_hex(v['$hex'], opts), :generic) end # Transform documents to have the correct object types for serialization. # # @param [ Array ] docs The documents to transform. # @param [ Hash ] opts The options. # # @return [ Array ] The transformed documents. # # @since 2.1.0 def transform_docs(docs, opts = {}) docs.collect do |doc| doc.each do |k, v| doc[k] = send("convert_#{k}", v, opts) if respond_to?("convert_#{k}") end doc end end # Convert a string to a hex value. # # @param [ String ] string The value to convert. # @param [ Hash ] opts The options. # # @return [ String ] The hex value. # # @since 2.1.0 def to_hex(string, opts = {}) [ string ].pack('H*') end # Convert an object id represented in json to a BSON::ObjectId. # A new BSON::ObjectId is returned if the json document is empty. # # @param [ Object ] value The value to convert. # @param [ Object ] id The id override. # # @return [ BSON::ObjectId ] The object id. # # @since 2.1.0 def to_oid(value, id = nil) if id id elsif value.is_a?(BSON::ObjectId) value elsif value['$oid'] BSON::ObjectId.from_string(value['$oid']) else BSON::ObjectId.new end end # Convert options. # # @return [ Hash ] The options. # # @since 2.1.0 def options @act['arguments']['options'].reduce({}) do |opts, (k, v)| opts.merge!(chunk_size: v) if k == "chunkSizeBytes" opts.merge!(upload_date: upload_date) opts.merge!(content_type: v) if k == "contentType" opts.merge!(metadata: v) if k == "metadata" opts end end end # Represents a single GridFS test. # # @since 2.1.0 class Test include Convertible extend Forwardable def_delegators :@operation, :expected_files_collection, :expected_chunks_collection, :result, :expected_error, :expected_result, :error? # The test description. # # @return [ String ] The test description. # # @since 2.1.0 attr_reader :description # The upload date to use in the test. # # @return [ Time ] The upload date. # # @since 2.1.0 attr_reader :upload_date # Mapping of test error strings to driver classes. # # @since 2.1.0 ERROR_MAPPING = { 'FileNotFound' => Mongo::Error::FileNotFound, 'ChunkIsMissing' => Mongo::Error::MissingFileChunk, 'ChunkIsWrongSize' => Mongo::Error::UnexpectedChunkLength, 'ExtraChunk' => Mongo::Error::ExtraFileChunk, 'RevisionNotFound' => Mongo::Error::InvalidFileRevision } # Instantiate the new GridFS::Test. # # @example Create the test. # Test.new(data, test) # # @param [ Array ] data The documents the files and chunks # collections must have before the test runs. # @param [ Hash ] test The test specification. # # @since 2.1.0 def initialize(data, test) @pre_data = data @description = test['description'] @upload_date = Time.now if test['assert']['error'] @operation = UnsuccessfulOp.new(self, test) else @operation = SuccessfulOp.new(self, test) end @result = nil end # Whether the expected and actual collections should be compared after the test runs. # # @return [ true, false ] Whether the actual and expected collections should be compared. # # @since 2.1.0 def assert_data? @operation.assert['data'] end # Run the test. # # @example Run the test # test.run(fs) # # @param [ Mongo::Grid::FSBucket ] fs The Grid::FSBucket to use in the test. # # @since 2.1.0 def run(fs) setup(fs) @operation.run(fs) end # Clear the files and chunks collection in the FSBucket and other collections used in the test. # # @example Clear the test collections # test.clear_collections(fs) # # @param [ Mongo::Grid::FSBucket ] fs The Grid::FSBucket whose collections should be cleared. # # @since 2.1.0 def clear_collections(fs) fs.files_collection.delete_many fs.chunks_collection.delete_many @operation.clear_collections(fs) end private def setup(fs) insert_pre_data(fs) @operation.arrange(fs) end def files_data @files_data ||= transform_docs(@pre_data['files']) end def chunks_data @chunks_data ||= transform_docs(@pre_data['chunks']) end def insert_pre_files_data(fs) fs.files_collection.insert_many(files_data) fs.database['expected.files'].insert_many(files_data) if assert_data? end def insert_pre_chunks_data(fs) fs.chunks_collection.insert_many(chunks_data) fs.database['expected.chunks'].insert_many(chunks_data) if assert_data? end def insert_pre_data(fs) insert_pre_files_data(fs) unless files_data.empty? insert_pre_chunks_data(fs) unless chunks_data.empty? end # Contains logic and helper methods shared between a successful and # non-successful GridFS test operation. # # @since 2.1.0 module Operable extend Forwardable def_delegators :@test, :upload_date # The test operation name. # # @return [ String ] The operation name. # # @since 2.1.0 attr_reader :op # The test assertion. # # @return [ Hash ] The test assertion definition. # # @since 2.1.0 attr_reader :assert # The operation result. # # @return [ Object ] The operation result. # # @since 2.1.0 attr_reader :result # The collection containing the expected files. # # @return [ Mongo::Collection ] The expected files collection. # # @since 2.1.0 attr_reader :expected_files_collection # The collection containing the expected chunks. # # @return [ Mongo::Collection ] The expected chunks collection. # # @since 2.1.0 attr_reader :expected_chunks_collection # Instantiate the new test operation. # # @example Create the test operation. # Test.new(data, test) # # @param [ Test ] test The test. # @param [ Hash ] spec The test specification. # # @since 2.1.0 def initialize(test, spec) @test = test @arrange = spec['arrange'] @act = spec['act'] @op = @act['operation'] @arguments = @act['arguments'] @assert = spec['assert'] end # Arrange the data before running the operation. # This sets up the correct scenario for the test. # # @example Arrange the data. # operation.arrange(fs) # # @param [ Grid::FSBucket ] fs The FSBucket used in the test. # # @since 2.1.0 def arrange(fs) if @arrange @arrange['data'].each do |data| send("#{data.keys.first}_exp_data", fs, data) end end end # Run the test operation. # # @example Execute the operation. # operation.run(fs) # # @param [ Grid::FSBucket ] fs The FSBucket used in the test. # # @result [ Object ] The operation result. # # @since 2.1.0 def run(fs) @expected_files_collection = fs.database['expected.files'] @expected_chunks_collection = fs.database['expected.chunks'] act(fs) prepare_expected_collections(fs) result end private def prepare_expected_collections(fs) if @test.assert_data? @assert['data'].each do |data| op = "#{data.keys.first}_exp_data" send(op, fs, data) end end end def insert_exp_data(fs, data) coll = fs.database[data['insert']] if coll.name =~ /.files/ opts = { id: @result } else opts = { files_id: @result } end coll.insert_many(transform_docs(data['documents'], opts)) end def delete_exp_data(fs, data) coll = fs.database[data['delete']] data['deletes'].each do |del| id = del['q'].keys.first coll.find(id => to_oid(del['q'][id])).send("delete_#{limit(del['limit'])}") end end def update_exp_data(fs, data) coll = fs.database[data['update']] data['updates'].each do |update| sel = update['q'].merge('files_id' => to_oid(update['q']['files_id'])) data = BSON::Binary.new(to_hex(update['u']['$set']['data']['$hex']), :generic) u = update['u'].merge('$set' => { 'data' => data }) coll.find(sel).update_one(u) end end def upload(fs) io = StringIO.new(to_hex(@arguments['source']['$hex'])) fs.upload_from_stream(@arguments['filename'], io, options) end def download(fs) io = StringIO.new.set_encoding(BSON::BINARY) fs.download_to_stream(to_oid(@arguments['id']), io) io.string end def download_by_name(fs) io = StringIO.new.set_encoding(BSON::BINARY) if @arguments['options'] fs.download_to_stream_by_name(@arguments['filename'], io, revision: @arguments['options']['revision']) else fs.download_to_stream_by_name(@arguments['filename'], io) end io.string end def delete(fs) fs.delete(to_oid(@arguments['id'])) end end # A GridFS test operation that is expected to succeed. # # @since 2.1.0 class SuccessfulOp include Convertible include Test::Operable # The expected result of executing the operation. # # @example Get the expected result. # operation.expected_result # # @result [ Object ] The operation result. # # @since 2.1.0 def expected_result if @assert['result'] == '&result' @result elsif @assert['result'] != 'void' to_hex(@assert['result']['$hex']) end end # Execute the operation. # # @example Execute the operation. # operation.act(fs) # # @param [ Grid::FSBucket ] fs The FSBucket used in the test. # # @result [ Object ] The operation result. # # @since 2.1.0 def act(fs) @result = send(op, fs) end # Whether this operation is expected to raise an error. # # @return [ false ] The operation is expected to succeed. # # @since 2.1.0 def error? false end end class UnsuccessfulOp include Convertible include Test::Operable # Whether this operation is expected to raise an error. # # @return [ true ] The operation is expected to fail. # # @since 2.1.0 def error? true end # The expected error. # # @example Execute the operation. # operation.expected_error # # @return [ String ] The expected error name. # # @since 2.1.0 def expected_error @assert['error'] end # Execute the operation. # # @example Execute the operation. # operation.act(fs) # # @param [ Grid::FSBucket ] fs The FSBucket used in the test. # # @result [ Mongo::Error ] The error encountered. # # @since 2.1.0 def act(fs) begin send(op, fs) rescue => ex @result = ex end end end end end end mongo-2.5.1/spec/support/gridfs_tests/0000755000004100000410000000000013257253113020036 5ustar www-datawww-datamongo-2.5.1/spec/support/gridfs_tests/download_by_name.yml0000644000004100000410000001005513257253113024063 0ustar www-datawww-datadata: files: - _id: { "$oid" : "000000000000000000000001" } length: 1 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "47ed733b8d10be225eceba344d533586" filename: "abc" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000002" } length: 1 chunkSize: 4 uploadDate: { "$date" : "1970-01-02T00:00:00.000Z" } md5: "b15835f133ff2e27c7cb28117bfae8f4" filename: "abc" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000003" } length: 1 chunkSize: 4 uploadDate: { "$date" : "1970-01-03T00:00:00.000Z" } md5: "eccbc87e4b5ce2fe28308fd9f2a7baf3" filename: "abc" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000004" } length: 1 chunkSize: 4 uploadDate: { "$date" : "1970-01-04T00:00:00.000Z" } md5: "f623e75af30e62bbd73d6df5b50bb7b5" filename: "abc" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000005" } length: 1 chunkSize: 4 uploadDate: { "$date" : "1970-01-05T00:00:00.000Z" } md5: "4c614360da93c0a041b22e537de151eb" filename: "abc" contentType: "application/octet-stream" aliases: [] metadata: {} chunks: - { _id : { "$oid" : "000000000000000000000001" }, files_id : { "$oid" : "000000000000000000000001" }, n : 0, data : { $hex : "11" } } - { _id : { "$oid" : "000000000000000000000002" }, files_id : { "$oid" : "000000000000000000000002" }, n : 0, data : { $hex : "22" } } - { _id : { "$oid" : "000000000000000000000003" }, files_id : { "$oid" : "000000000000000000000003" }, n : 0, data : { $hex : "33" } } - { _id : { "$oid" : "000000000000000000000004" }, files_id : { "$oid" : "000000000000000000000004" }, n : 0, data : { $hex : "44" } } - { _id : { "$oid" : "000000000000000000000005" }, files_id : { "$oid" : "000000000000000000000005" }, n : 0, data : { $hex : "55" } } tests: - description: "Download_by_name when revision is 0" act: operation: download_by_name arguments: filename: "abc" options: { revision : 0 } assert: result: { $hex : "11" } - description: "Download_by_name when revision is 1" act: operation: download_by_name arguments: filename: "abc" options: { revision : 1 } assert: result: { $hex : "22" } - description: "Download_by_name when revision is -2" act: operation: download_by_name arguments: filename: "abc" options: { revision : -2 } assert: result: { $hex : "44" } - description: "Download_by_name when revision is -1" act: operation: download_by_name arguments: filename: "abc" options: { revision : -1 } assert: result: { $hex : "55" } - description: "Download_by_name when files entry does not exist" act: operation: download_by_name arguments: filename: "xyz" assert: error: "FileNotFound" - description: "Download_by_name when revision does not exist" act: operation: download_by_name arguments: filename: "abc" options: { revision : 999 } assert: error: "RevisionNotFound" mongo-2.5.1/spec/support/gridfs_tests/delete.yml0000644000004100000410000001456513257253113022036 0ustar www-datawww-datadata: files: - _id: { "$oid" : "000000000000000000000001" } length: 0 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "d41d8cd98f00b204e9800998ecf8427e" filename: "length-0" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000002" } length: 0 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "d41d8cd98f00b204e9800998ecf8427e" filename: "length-0-with-empty-chunk" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000003" } length: 2 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "c700ed4fdb1d27055aa3faa2c2432283" filename: "length-2" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000004" } length: 8 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "dd254cdc958e53abaa67da9f797125f5" filename: "length-8" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000005" } length: 8 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "dd254cdc958e53abaa67da9f797125f5" filename: "length-8-with-empty-chunk" contentType: "application/octet-stream" aliases: [] metadata: {} chunks: - { _id : { "$oid" : "000000000000000000000001" }, files_id : { "$oid" : "000000000000000000000002" }, n : 0, data : { $hex : "" } } - { _id : { "$oid" : "000000000000000000000002" }, files_id : { "$oid" : "000000000000000000000003" }, n : 0, data : { $hex : "1122" } } - { _id : { "$oid" : "000000000000000000000003" }, files_id : { "$oid" : "000000000000000000000004" }, n : 0, data : { $hex : "11223344" } } - { _id : { "$oid" : "000000000000000000000004" }, files_id : { "$oid" : "000000000000000000000004" }, n : 1, data : { $hex : "55667788" } } - { _id : { "$oid" : "000000000000000000000005" }, files_id : { "$oid" : "000000000000000000000005" }, n : 0, data : { $hex : "11223344" } } - { _id : { "$oid" : "000000000000000000000006" }, files_id : { "$oid" : "000000000000000000000005" }, n : 1, data : { $hex : "55667788" } } - { _id : { "$oid" : "000000000000000000000007" }, files_id : { "$oid" : "000000000000000000000005" }, n : 2, data : { $hex : "" } } tests: - description: "Delete when length is 0" act: operation: delete arguments: id: { "$oid" : "000000000000000000000001" } assert: result: void data: - { delete : "expected.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000001" } }, limit : 1 } ] } - description: "Delete when length is 0 and there is one extra empty chunk" act: operation: delete arguments: id: { "$oid" : "000000000000000000000002" } assert: result: void data: - { delete : "expected.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000002" } }, limit : 1 } ] } - { delete : "expected.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000002" } }, limit : 0 } ] } - description: "Delete when length is 8" act: operation: delete arguments: id: { "$oid" : "000000000000000000000004" } assert: result: void data: - { delete : "expected.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000004" } }, limit : 1 } ] } - { delete : "expected.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000004" } }, limit : 0 } ] } - description: "Delete when length is 8 and there is one extra empty chunk" act: operation: delete arguments: id: { "$oid" : "000000000000000000000005" } assert: result: void data: - { delete : "expected.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000005" } }, limit : 1 } ] } - { delete : "expected.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000005" } }, limit : 0 } ] } - description: "Delete when files entry does not exist" act: operation: delete arguments: id: { "$oid" : "000000000000000000000000" } assert: error: "FileNotFound" - description: "Delete when files entry does not exist and there are orphaned chunks" arrange: data: - { delete : "fs.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000005" } }, limit : 1 } ] } act: operation: delete arguments: id: { "$oid" : "000000000000000000000005" } assert: error: "FileNotFound" data: - { delete : "expected.files", deletes : [ { q : { _id : { "$oid" : "000000000000000000000005" } }, limit : 1 } ] } - { delete : "expected.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000005" } }, limit : 0 } ] } mongo-2.5.1/spec/support/gridfs_tests/download.yml0000644000004100000410000002065613257253113022401 0ustar www-datawww-datadata: files: - _id: { "$oid" : "000000000000000000000001" } length: 0 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "d41d8cd98f00b204e9800998ecf8427e" filename: "length-0" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000002" } length: 0 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "d41d8cd98f00b204e9800998ecf8427e" filename: "length-0-with-empty-chunk" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000003" } length: 2 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "c700ed4fdb1d27055aa3faa2c2432283" filename: "length-2" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000004" } length: 8 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "dd254cdc958e53abaa67da9f797125f5" filename: "length-8" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000005" } length: 10 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "57d83cd477bfb1ccd975ab33d827a92b" filename: "length-10" contentType: "application/octet-stream" aliases: [] metadata: {} - _id: { "$oid" : "000000000000000000000006" } length: 12 chunkSize: 4 uploadDate: { "$date" : "1970-01-01T00:00:00.000Z" } md5: "6289ac1db331d1c7677a4b7e123178f9" filename: "length-12-with-empty-chunk" contentType: "application/octet-stream" aliases: [] metadata: {} chunks: - { _id : { "$oid" : "000000000000000000000001" }, files_id : { "$oid" : "000000000000000000000002" }, n : 0, data : { $hex : "" } } - { _id : { "$oid" : "000000000000000000000002" }, files_id : { "$oid" : "000000000000000000000003" }, n : 0, data : { $hex : "1122" } } - { _id : { "$oid" : "000000000000000000000003" }, files_id : { "$oid" : "000000000000000000000004" }, n : 0, data : { $hex : "11223344" } } - { _id : { "$oid" : "000000000000000000000004" }, files_id : { "$oid" : "000000000000000000000004" }, n : 1, data : { $hex : "55667788" } } - { _id : { "$oid" : "000000000000000000000005" }, files_id : { "$oid" : "000000000000000000000005" }, n : 0, data : { $hex : "11223344" } } - { _id : { "$oid" : "000000000000000000000006" }, files_id : { "$oid" : "000000000000000000000005" }, n : 1, data : { $hex : "55667788" } } - { _id : { "$oid" : "000000000000000000000007" }, files_id : { "$oid" : "000000000000000000000005" }, n : 2, data : { $hex : "99aa" } } - { _id : { "$oid" : "000000000000000000000008" }, files_id : { "$oid" : "000000000000000000000006" }, n : 0, data : { $hex : "11223344" } } - { _id : { "$oid" : "000000000000000000000009" }, files_id : { "$oid" : "000000000000000000000006" }, n : 1, data : { $hex : "55667788" } } - { _id : { "$oid" : "000000000000000000000010" }, files_id : { "$oid" : "000000000000000000000006" }, n : 2, data : { $hex : "99aabbcc" } } - { _id : { "$oid" : "000000000000000000000011" }, files_id : { "$oid" : "000000000000000000000006" }, n : 3, data : { $hex : "" } } tests: - description: "Download when length is zero" act: operation: download arguments: id: { "$oid" : "000000000000000000000001" } options: { } assert: result: { $hex : "" } - description: "Download when length is zero and there is one empty chunk" act: operation: download arguments: id: { "$oid" : "000000000000000000000002" } options: { } assert: result: { $hex : "" } - description: "Download when there is one chunk" act: operation: download arguments: id: { "$oid" : "000000000000000000000003" } options: { } assert: result: { $hex : "1122" } - description: "Download when there are two chunks" act: operation: download arguments: id: { "$oid" : "000000000000000000000004" } options: { } assert: result: { $hex : "1122334455667788" } - description: "Download when there are three chunks" act: operation: download arguments: id: { "$oid" : "000000000000000000000005" } options: { } assert: result: { $hex : "112233445566778899aa" } - description: "Download when there are three chunks and one extra empty chunk at the end" act: operation: download arguments: id: { "$oid" : "000000000000000000000006" } options: { } assert: result: { $hex : "112233445566778899aabbcc" } - description: "Download when files entry does not exist" act: operation: download arguments: id: { "$oid" : "000000000000000000000000" } options: { } assert: error: "FileNotFound" - description: "Download when an intermediate chunk is missing" arrange: data: - { delete : "fs.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000005" }, n : 1 }, limit : 1 } ] } act: operation: download arguments: id: { "$oid" : "000000000000000000000005" } assert: error: "ChunkIsMissing" - description: "Download when final chunk is missing" arrange: data: - { delete : "fs.chunks", deletes : [ { q : { files_id : { "$oid" : "000000000000000000000005" }, n : 1 }, limit : 1 } ] } act: operation: download arguments: id: { "$oid" : "000000000000000000000005" } assert: error: "ChunkIsMissing" - description: "Download when there is an extra chunk" arrange: data: - { insert : "fs.chunks", documents : [ { _id : { "$oid" : "000000000000000000000012" }, files_id : { "$oid" : "000000000000000000000004" }, n : 2, data : { $hex : "99" } } ] } act: operation: download arguments: id: { "$oid" : "000000000000000000000004" } assert: error: "ExtraChunk" - description: "Download when an intermediate chunk is the wrong size" arrange: data: - { update : "fs.chunks", updates : [ { q : { files_id : { "$oid" : "000000000000000000000005" }, n : 1 }, u : { $set : { data : { $hex : "556677" } } } }, { q : { files_id : { "$oid" : "000000000000000000000005" }, n : 2 }, u : { $set : { data : { $hex : "8899aa" } } } } ] } act: operation: download arguments: id: { "$oid" : "000000000000000000000005" } assert: error: "ChunkIsWrongSize" - description: "Download when final chunk is the wrong size" arrange: data: - { update : "fs.chunks", updates : [ { q : { files_id : { "$oid" : "000000000000000000000005" }, n : 2 }, u : { $set : { data : { $hex : "99" } } } } ] } act: operation: download arguments: id: { "$oid" : "000000000000000000000005" } assert: error: "ChunkIsWrongSize" mongo-2.5.1/spec/support/gridfs_tests/upload.yml0000644000004100000410000001451013257253113022046 0ustar www-datawww-datadata: files: [] chunks: [] tests: - description: "Upload when length is 0" act: operation: upload arguments: filename: "filename" source: { $hex : "" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 0, chunkSize : 4, uploadDate : "*actual", md5 : "d41d8cd98f00b204e9800998ecf8427e", filename : "filename" } ] } - description: "Upload when length is 1" act: operation: upload arguments: filename: "filename" source: { $hex : "11" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 1, chunkSize : 4, uploadDate : "*actual", md5 : "47ed733b8d10be225eceba344d533586", filename : "filename" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11" } } ] } - description: "Upload when length is 3" act: operation: upload arguments: filename: "filename" source: { $hex : "112233" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 3, chunkSize : 4, uploadDate : "*actual", md5 : "bafae3a174ab91fc70db7a6aa50f4f52", filename : "filename" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "112233" } } ] } - description: "Upload when length is 4" act: operation: upload arguments: filename: "filename" source: { $hex : "11223344" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 4, chunkSize : 4, uploadDate : "*actual", md5 : "7e7c77cff5705d1f7574a25ef6662117", filename : "filename" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11223344" } } ] } - description: "Upload when length is 5" act: operation: upload arguments: filename: "filename" source: { $hex : "1122334455" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 5, chunkSize : 4, uploadDate : "*actual", md5 : "283d4fea5dded59cf837d3047328f5af", filename : "filename" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11223344" } }, { _id : "*actual", files_id : "*result", n : 1, data : { $hex : "55" } } ] } - description: "Upload when length is 8" act: operation: upload arguments: filename: "filename" source: { $hex : "1122334455667788" } options: { chunkSizeBytes : 4 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 8, chunkSize : 4, uploadDate : "*actual", md5 : "dd254cdc958e53abaa67da9f797125f5", filename : "filename" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11223344" } }, { _id : "*actual", files_id : "*result", n : 1, data : { $hex : "55667788" } } ] } - description: "Upload when contentType is provided" act: operation: upload arguments: filename: "filename" source: { $hex : "11" } options: { chunkSizeBytes : 4, contentType : "image/jpeg" } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 1, chunkSize : 4, uploadDate : "*actual", md5 : "47ed733b8d10be225eceba344d533586", filename : "filename", contentType : "image/jpeg" } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11" } } ] } - description: "Upload when metadata is provided" act: operation: upload arguments: filename: "filename" source: { $hex : "11" } options: chunkSizeBytes: 4 metadata: { x : 1 } assert: result: "&result" data: - { insert : "expected.files", documents : [ { _id : "*result", length : 1, chunkSize : 4, uploadDate : "*actual", md5 : "47ed733b8d10be225eceba344d533586", filename : "filename", metadata : { x : 1 } } ] } - { insert : "expected.chunks", documents : [ { _id : "*actual", files_id : "*result", n : 0, data : { $hex : "11" } } ] } mongo-2.5.1/spec/support/matchers.rb0000644000004100000410000000135713257253113017477 0ustar www-datawww-dataRSpec::Matchers.define :be_int32 do |num| match do |actual| actual == [num].pack('l<') end end RSpec::Matchers.define :be_int64 do |num| match do |actual| actual == [num].pack('q<') end end RSpec::Matchers.define :be_int64_sequence do |array| match do |actual| actual == array.reduce(String.new) do |buffer, num| buffer << [num].pack('q<') end end end RSpec::Matchers.define :be_cstring do |string| match do |actual| actual == "#{string.force_encoding(BSON::BINARY)}\0" end end RSpec::Matchers.define :be_bson do |hash| match do |actual| actual == hash.to_bson.to_s end end RSpec::Matchers.define :be_bson_sequence do |array| match do |actual| actual == array.map(&:to_bson).join end end mongo-2.5.1/spec/support/crud/0000755000004100000410000000000013257253113016273 5ustar www-datawww-datamongo-2.5.1/spec/support/crud/read.rb0000644000004100000410000001055113257253113017535 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module CRUD module Operation # Defines common behaviour for running CRUD read operation tests # on a collection. # # @since 2.0.0 class Read # Map of method names to test operation argument names. # # @since 2.0.0 ARGUMENT_MAP = { :sort => 'sort', :skip => 'skip', :batch_size => 'batchSize', :limit => 'limit', :collation => 'collation' }.freeze # Map of read preference mode names to their equivalent Ruby-formatted symbols. # # @since 2.4.0 READ_PREFERENCE_MAP = { 'primary' => :primary, 'secondary' => :secondary, 'primaryPreferred' => :primary_preferred, 'secondaryPreferred' => :secondary_preferred, 'nearest' => :nearest }.freeze # The operation name. # # @return [ String ] name The operation name. # # @since 2.0.0 attr_reader :name # Instantiate the operation. # # @return [ Hash ] spec The operation spec. # # @since 2.0.0 def initialize(spec) @spec = spec @name = spec['name'] end # Execute the operation. # # @example Execute the operation. # operation.execute # # @param [ Collection ] collection The collection to execute the operation on. # # @return [ Result, Array ] The result of executing the operation. # # @since 2.0.0 def execute(collection) send(name.to_sym, collection) end # Whether the operation is expected to have restuls. # # @example Whether the operation is expected to have results. # operation.has_results? # # @return [ true, false ] If the operation is expected to have results. # # @since 2.0.0 def has_results? !(name == 'aggregate' && pipeline.find {|op| op.keys.include?('$out') }) end private def count(collection) options = ARGUMENT_MAP.reduce({}) do |opts, (key, value)| opts.merge!(key => arguments[value]) if arguments[value] opts end collection.count(filter, options) end def aggregate(collection) collection.aggregate(pipeline, options).to_a end def distinct(collection) collection.distinct(field_name, filter, options) end def find(collection) opts = modifiers ? options.merge(modifiers: BSON::Document.new(modifiers)) : options (read_preference ? collection.with(read: read_preference) : collection).find(filter, opts).to_a end def options ARGUMENT_MAP.reduce({}) do |opts, (key, value)| arguments[value] ? opts.merge!(key => arguments[value]) : opts end end def collation arguments['collation'] end def batch_size arguments['batchSize'] end def filter arguments['filter'] end def pipeline arguments['pipeline'] end def modifiers arguments['modifiers'] end def field_name arguments['fieldName'] end def arguments @spec['arguments'] end def read_preference if @spec['read_preference'] && @spec['read_preference']['mode'] { mode: READ_PREFERENCE_MAP[@spec['read_preference']['mode']] } end end end end end end mongo-2.5.1/spec/support/crud/write.rb0000644000004100000410000002006113257253113017751 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module CRUD module Operation # Defines common behaviour for running CRUD write operation tests on a # collection. # # @since 2.0.0 class Write # Map of CRUD operation names to method names. # # @since 2.0.0 OPERATIONS = { 'deleteMany' => :delete_many, 'deleteOne' => :delete_one, 'insertMany' => :insert_many, 'insertOne' => :insert_one, 'replaceOne' => :replace_one, 'updateMany' => :update_many, 'updateOne' => :update_one, 'findOneAndDelete' => :find_one_and_delete, 'findOneAndReplace' => :find_one_and_replace, 'findOneAndUpdate' => :find_one_and_update, 'bulkWrite' => :bulk_write }.freeze # Map of operation options to method names. # # @since 2.0.0 ARGUMENT_MAP = { :sort => 'sort', :projection => 'projection', :return_document => 'returnDocument', :upsert => 'upsert', :ordered => 'ordered', :write_concern => 'writeConcern', :collation => 'collation', :array_filters => 'arrayFilters' }.freeze # The operation name. # # @return [ String ] name The operation name. # # @since 2.0.0 attr_reader :name # Instantiate the operation. # # @return [ Hash ] spec The operation spec. # # @since 2.0.0 def initialize(spec) @spec = spec @name = spec['name'] end # Whether the operation is expected to have restuls. # # @example Whether the operation is expected to have results. # operation.has_results? # # @return [ true ] If the operation is expected to have results. # # @since 2.0.0 def has_results? true end # Execute the operation. # # @example Execute the operation. # operation.execute # # @param [ Collection ] collection The collection to execute # the operation on. # # @return [ Result, Array ] The result of executing the operation. # # @since 2.0.0 def execute(collection) send(OPERATIONS[name], collection) end private def bulk_write(collection) result = collection.bulk_write(requests, options) return_doc = {} return_doc['deletedCount'] = result.deleted_count if result.deleted_count return_doc['insertedIds'] = result.inserted_ids if result.inserted_ids return_doc['upsertedId'] = result.upserted_id if upsert return_doc['upsertedCount'] = result.upserted_count if result.upserted_count return_doc['matchedCount'] = result.matched_count if result.matched_count return_doc['modifiedCount'] = result.modified_count if result.modified_count return_doc end def delete_many(collection) result = collection.delete_many(filter, options) { 'deletedCount' => result.deleted_count } end def delete_one(collection) result = collection.delete_one(filter, options) { 'deletedCount' => result.deleted_count } end def insert_many(collection) result = collection.insert_many(documents, options) { 'insertedIds' => result.inserted_ids } end def insert_one(collection) result = collection.insert_one(document) { 'insertedId' => result.inserted_id } end def update_return_doc(result) return_doc = {} return_doc['upsertedId'] = result.upserted_id if upsert return_doc['upsertedCount'] = result.upserted_count return_doc['matchedCount'] = result.matched_count return_doc['modifiedCount'] = result.modified_count if result.modified_count return_doc end def replace_one(collection) result = collection.replace_one(filter, replacement, options) update_return_doc(result) end def update_many(collection) result = collection.update_many(filter, update, options) update_return_doc(result) end def update_one(collection) result = collection.update_one(filter, update, options) update_return_doc(result) end def find_one_and_delete(collection) collection.find_one_and_delete(filter, options) end def find_one_and_replace(collection) collection.find_one_and_replace(filter, replacement, options) end def find_one_and_update(collection) collection.find_one_and_update(filter, update, options) end def options ARGUMENT_MAP.reduce({}) do |opts, (key, value)| arguments.key?(value) ? opts.merge!(key => send(key)) : opts end end def collation arguments['collation'] end def replacement arguments['replacement'] end def sort arguments['sort'] end def projection arguments['projection'] end def documents arguments['documents'] end def document arguments['document'] end def write_concern arguments['writeConcern'] end def ordered arguments['ordered'] end def filter arguments['filter'] end def array_filters arguments['arrayFilters'] end def requests arguments['requests'].map do |request| case request.keys.first when 'insertOne' then { insert_one: request['insertOne']['document'] } when 'updateOne' then update = request['updateOne'] { update_one: { filter: update['filter'], update: update['update'] } } when 'name' then bulk_request(request) end end end def bulk_request(request) op_name = OPERATIONS[request['name']] op = { op_name => {} } op[op_name].merge!(filter: request['arguments']['filter']) if request['arguments']['filter'] op[op_name].merge!(update: request['arguments']['update']) if request['arguments']['update'] op[op_name].merge!(upsert: request['arguments']['upsert']) if request['arguments']['upsert'] op[op_name].merge!(replacement: request['arguments']['replacement']) if request['arguments']['replacement'] op[op_name].merge!(array_filters: request['arguments']['arrayFilters']) if request['arguments']['arrayFilters'] op[op_name] = request['arguments']['document'] if request['arguments']['document'] op end def upsert arguments['upsert'] end def return_document case arguments['returnDocument'] when 'Before' :before when 'After' :after end end def update arguments['update'] end def arguments @spec['arguments'] end end end end end mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/0000755000004100000410000000000013257253113023327 5ustar www-datawww-datamongo-2.5.1/spec/support/dns_seedlist_discovery_tests/txt-record-with-overridden-ssl-option.yml0000644000004100000410000000037113257253113033363 0ustar www-datawww-datauri: "mongodb+srv://test5.test.build.10gen.cc/?ssl=false" seeds: - localhost.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 authSource: thisDB ssl: false mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/returned-parent-too-short.yml0000644000004100000410000000025213257253113031124 0ustar www-datawww-datauri: "mongodb+srv://test13.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name's parent (build.10gen.cc) misses "test." mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/longer-parent-in-return.yml0000644000004100000410000000050313257253113030546 0ustar www-datawww-datauri: "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0" seeds: - localhost.sub.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 ssl: true comment: Is correct, as returned host name shared the URI root "test.build.10gen.cc". mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/no-results.yml0000644000004100000410000000022613257253113026165 0ustar www-datawww-datauri: "mongodb+srv://test4.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because no SRV records are present for this URI. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/parent-part-mismatch2.yml0000644000004100000410000000027013257253113030173 0ustar www-datawww-datauri: "mongodb+srv://test15.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name's part "not-build" mismatches URI parent part "build". mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/txt-record-with-unallowed-option.yml0000644000004100000410000000021513257253113032412 0ustar www-datawww-datauri: "mongodb+srv://test7.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because "ssl" is not an allowed option. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/two-txt-records.yml0000644000004100000410000000021013257253113027130 0ustar www-datawww-datauri: "mongodb+srv://test6.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because there are two TXT records. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/uri-with-two-hosts.yml0000644000004100000410000000030513257253113027565 0ustar www-datawww-datauri: "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0" seeds: [] hosts: [] error: true comment: Should fail because the mongodb+srv URI includes two host names. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/misformatted-option.yml0000644000004100000410000000026513257253113030061 0ustar www-datawww-datauri: "mongodb+srv://test8.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because the options in the TXT record are incorrectly formatted (misses value). mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/parent-part-mismatch4.yml0000644000004100000410000000025113257253113030174 0ustar www-datawww-datauri: "mongodb+srv://test17.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name's TLD "not-cc" mismatches URI TLD "cc". mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/one-result-default-port.yml0000644000004100000410000000035013257253113030551 0ustar www-datawww-datauri: "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0" seeds: - localhost.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/parent-part-mismatch5.yml0000644000004100000410000000027213257253113030200 0ustar www-datawww-datauri: "mongodb+srv://test19.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because one of the returned host names' domain name parts "evil" mismatches "test". mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/txt-record-not-allowed-option.yml0000644000004100000410000000025113257253113031674 0ustar www-datawww-datauri: "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0" seeds: [] hosts: [] error: true comment: Should fail because socketTimeoutMS is not an allowed option. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/one-txt-record-multiple-strings.yml0000644000004100000410000000033013257253113032240 0ustar www-datawww-datauri: "mongodb+srv://test11.test.build.10gen.cc/" seeds: - localhost.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/returned-parent-wrong.yml0000644000004100000410000000024713257253113030326 0ustar www-datawww-datauri: "mongodb+srv://test12.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name is too short and mismatches a parent. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/two-results-default-port.yml0000644000004100000410000000042213257253113030764 0ustar www-datawww-datauri: "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0" seeds: - localhost.test.build.10gen.cc:27017 - localhost.test.build.10gen.cc:27018 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/two-results-nonstandard-port.yml0000644000004100000410000000042213257253113031653 0ustar www-datawww-datauri: "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0" seeds: - localhost.test.build.10gen.cc:27018 - localhost.test.build.10gen.cc:27019 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/one-txt-record.yml0000644000004100000410000000035613257253113026730 0ustar www-datawww-datauri: "mongodb+srv://test5.test.build.10gen.cc/" seeds: - localhost.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 authSource: thisDB ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/not-enough-parts.yml0000644000004100000410000000023213257253113027261 0ustar www-datawww-datauri: "mongodb+srv://10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because host in URI does not have {hostname}, {domainname} and {tld}. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/uri-with-port.yml0000644000004100000410000000025013257253113026601 0ustar www-datawww-datauri: "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0" seeds: [] hosts: [] error: true comment: Should fail because the mongodb+srv URI includes a port. mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/parent-part-mismatch1.yml0000644000004100000410000000026613257253113030177 0ustar www-datawww-datauri: "mongodb+srv://test14.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name's part "not-test" mismatches URI parent part "test". mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/txt-record-with-overridden-uri-option.yml0000644000004100000410000000040213257253113033354 0ustar www-datawww-datauri: "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB" seeds: - localhost.test.build.10gen.cc:27017 hosts: - localhost:27017 - localhost:27018 - localhost:27019 options: replicaSet: repl0 authSource: otherDB ssl: true mongo-2.5.1/spec/support/dns_seedlist_discovery_tests/parent-part-mismatch3.yml0000644000004100000410000000027013257253113030174 0ustar www-datawww-datauri: "mongodb+srv://test16.test.build.10gen.cc/" seeds: [] hosts: [] error: true comment: Should fail because returned host name's part "not-10gen" mismatches URI parent part "10gen". mongo-2.5.1/spec/support/connection_string.rb0000644000004100000410000001234713257253113021417 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. RSpec::Matchers.define :have_hosts do |test| match do |cl| def find_server(client, host) client.cluster.instance_variable_get(:@servers).detect do |s| s.address.host == host.host end end def match_host?(server, host) server.address.host == host.host end def match_port?(server, host) server.address.port == host.port || !host.port end def match_address_family?(server, host) address_family(server) == host.address_family end def address_family(server) server.address.socket(2) server.address.instance_variable_get(:@resolver).class end test.hosts.all? do |host| server = find_server(cl, host) match_host?(server, host) && match_port?(server, host) if server #&& #match_address_family?(server, host) if server end failure_message do |client| "With URI: #{test.uri_string}\n" + "Expected that test hosts: #{test.hosts} would match " + "client hosts: #{cl.cluster.instance_variable_get(:@servers)}" end end end RSpec::Matchers.define :match_auth do |test| def match_database?(client, auth) client.options[:database] == auth.database || !auth.database end def match_password?(client, auth) client.options[:password] == auth.password || client.options[:password].nil? && auth.password == '' end match do |client| auth = test.auth return true unless auth client.options[:user] == auth.username && match_password?(client, auth) && match_database?(client, auth) end failure_message do |client| "With URI: #{test.uri_string}\n" + "Expected that test auth: #{test.auth} would match client auth: #{client.options}" end end RSpec::Matchers.define :match_options do |test| match do |client| options = test.options return true unless options options.match?(client.options) end failure_message do |client| "With URI: #{test.uri_string}\n" + "Expected that test options: #{test.options.options} would match client options: #{client.options}" end end module Mongo module ConnectionString class Spec attr_reader :description # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.0.0 def initialize(file) file = File.new(file) @spec = YAML.load(ERB.new(file.read).result) file.close @description = File.basename(file) end def tests @tests ||= @spec['tests'].collect do |spec| Test.new(spec) end end end class Test attr_reader :description attr_reader :uri_string def initialize(spec) @spec = spec @description = @spec['description'] @uri_string = @spec['uri'] end def valid? @spec['valid'] end def warn? @spec['warning'] end def hosts @hosts ||= @spec['hosts'].collect do |host| Host.new(host) end end def options @options ||= Options.new(@spec['options']) if @spec['options'] end def client @client ||= Mongo::Client.new(@spec['uri']) end def uri @uri ||= Mongo::URI.get(@spec['uri']) end def auth @auth ||= Auth.new(@spec['auth']) if @spec['auth'] end def raise_error? @spec['error'] end end class Host MAPPING = { 'ipv4' => Mongo::Address::IPv4, 'ipv6' => Mongo::Address::IPv6, 'unix' => Mongo::Address::Unix } attr_reader :host attr_reader :port def initialize(spec) @spec = spec @host = @spec['host'] @port = @spec['port'] end def address_family MAPPING[@spec['type']] end end class Auth attr_reader :username attr_reader :password attr_reader :database def initialize(spec) @spec = spec @username = @spec['username'] @password = @spec['password'] @database = @spec['db'] end def to_s "username: #{username}, password: #{password}, database: #{database}" end end class Options MAPPINGS = { 'replicaset' => :replica_set, 'authmechanism' => :auth_mech } attr_reader :options def initialize(options) @options = options end def match?(opts) @options.all? do |k, v| opts[MAPPINGS[k.downcase]] == v || opts[MAPPINGS[k.downcase]] == Mongo::URI::AUTH_MECH_MAP[v] end end end end end mongo-2.5.1/spec/support/server_discovery_and_monitoring.rb0000644000004100000410000001471313257253113024355 0ustar www-datawww-data# Matcher for determining if the server is of the expected type according to # the test. # # @since 2.0.0 RSpec::Matchers.define :be_server_type do |expected| match do |actual| case expected when 'Standalone' then actual.standalone? when 'RSPrimary' then actual.primary? when 'RSSecondary' then actual.secondary? when 'RSArbiter' then actual.arbiter? when 'Mongos' then actual.mongos? when 'Unknown' then actual.unknown? when 'PossiblePrimary' then actual.unknown? when 'RSGhost' then actual.ghost? when 'RSOther' then actual.other? end end end # Matcher for determining if the cluster topology is the expected type. # # @since 2.0.0 RSpec::Matchers.define :be_topology do |expected| match do |actual| case expected when 'ReplicaSetWithPrimary' then actual.replica_set? when 'ReplicaSetNoPrimary' then actual.replica_set? when 'Sharded' then actual.sharded? when 'Single' then actual.single? when 'Unknown' then actual.unknown? end end end module Mongo module SDAM # Convenience helper to find a server by it's URI. # # @since 2.0.0 def find_server(client, uri) client.cluster.instance_variable_get(:@servers).detect{ |s| s.address.to_s == uri } end # Helper to convert an extended JSON ObjectId electionId to BSON::ObjectId. # # @since 2.1.0 def self.convert_election_ids(docs) docs.each do |doc | doc['electionId'] = BSON::ObjectId.from_string(doc['electionId']['$oid']) if doc['electionId'] end end # Represents a specification. # # @since 2.0.0 class Spec # @return [ String ] description The spec description. attr_reader :description # @return [ Array ] phases The spec phases. attr_reader :phases # @return [ Mongo::URI ] uri The URI object. attr_reader :uri # @return [ String ] uri_string The passed uri string. attr_reader :uri_string # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.0.0 def initialize(file) file = File.new(file) @test = YAML.load(ERB.new(file.read).result) file.close @description = @test['description'] @uri_string = @test['uri'] @uri = URI.new(uri_string) @phases = @test['phases'].map{ |phase| Phase.new(phase, uri) } end end # Represents a phase in the spec. Phases are sequential. # # @since 2.0.0 class Phase # @return [ Outcome ] outcome The phase outcome. attr_reader :outcome # @return [ Array ] responses The responses for each server in # the phase. attr_reader :responses # Create the new phase. # # @example Create the new phase. # Phase.new(phase, uri) # # @param [ Hash ] phase The phase hash. # @param [ Mongo::URI ] uri The URI. # # @since 2.0.0 def initialize(phase, uri) @phase = phase @responses = @phase['responses'].map{ |response| Response.new(SDAM::convert_election_ids(response), uri) } @outcome = Outcome.new(@phase['outcome']) end end # Represents a server response during a phase. # # @since 2.0.0 class Response # @return [ String ] address The server address. attr_reader :address # @return [ Hash ] ismaster The ismaster response. attr_reader :ismaster # Create the new response. # # @example Create the response. # Response.new(response, uri) # # @param [ Hash ] response The response value. # @param [ Mongo::URI ] uri The URI. # # @since 2.0.0 def initialize(response, uri) @uri = uri @address = response[0] @ismaster = response[1] end end # Get the outcome or expectations from the phase. # # @since 2.0.0 class Outcome # @return [ Array ] events The expected events. attr_reader :events # @return [ Hash ] servers The expecations for # server states. attr_reader :servers # @return [ String ] set_name The expected RS set name. attr_reader :set_name # @return [ String ] topology_type The expected cluster topology type. attr_reader :topology_type # @return [ Integer, nil ] logical_session_timeout The expected logical session timeout. attr_reader :logical_session_timeout # Create the new outcome. # # @example Create the new outcome. # Outcome.new(outcome) # # @param [ Hash ] outcome The outcome object. # # @since 2.0.0 def initialize(outcome) @servers = process_servers(outcome['servers']) if outcome['servers'] @set_name = outcome['setName'] @topology_type = outcome['topologyType'] @logical_session_timeout = outcome['logicalSessionTimeoutMinutes'] @events = process_events(outcome['events']) if outcome['events'] @compatible = outcome['compatible'] end # Whether the server responses indicate that their versions are supported by the driver. # # @example Do the server responses indicate that their versions are supported by the driver. # outcome.compatible? # # @return [ true, false ] Whether the server versions are compatible with the driver. # # @since 2.5.1 def compatible? @compatible.nil? || !!@compatible end private def process_events(events) events.map do |event| Event.new(event.keys.first, event.values.first) end end def process_servers(servers) servers.each do |s| SDAM::convert_election_ids([ s[1] ]) end end end class Event MAPPINGS = { 'server_closed_event' => Mongo::Monitoring::Event::ServerClosed, 'server_description_changed_event' => Mongo::Monitoring::Event::ServerDescriptionChanged, 'server_opening_event' => Mongo::Monitoring::Event::ServerOpening, 'topology_description_changed_event' => Mongo::Monitoring::Event::TopologyChanged, 'topology_opening_event' => Mongo::Monitoring::Event::TopologyOpening } attr_reader :name attr_reader :data def initialize(name, data) @name = name @data = data end def expected MAPPINGS.fetch(name) end end end end mongo-2.5.1/spec/support/retryable_writes_tests/0000755000004100000410000000000013257253113022146 5ustar www-datawww-datamongo-2.5.1/spec/support/retryable_writes_tests/findOneAndDelete.yml0000644000004100000410000000274013257253113026024 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: # - # description: "FindOneAndDelete is committed on first attempt" # failPoint: # mode: { times: 1 } # operation: # name: "findOneAndDelete" # arguments: # filter: { x: { $gte: 11 }} # sort: { x: 1 } # outcome: # result: { _id: 1, x: 11 } # collection: # data: # - { _id: 2, x: 22 } # - # description: "FindOneAndDelete is not committed on first attempt" # failPoint: # mode: { times: 1 } # data: { failBeforeCommitExceptionCode: 1 } # operation: # name: "findOneAndDelete" # arguments: # filter: { x: { $gte: 11 }} # sort: { x: 1 } # outcome: # result: { _id: 1, x: 11 } # collection: # data: # - { _id: 2, x: 22 } - description: "FindOneAndDelete is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "findOneAndDelete" arguments: filter: { x: { $gte: 11 }} sort: { x: 1 } outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 }mongo-2.5.1/spec/support/retryable_writes_tests/updateOne.yml0000644000004100000410000000704613257253113024624 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: # - # description: "UpdateOne is committed on first attempt" # failPoint: # mode: { times: 1 } # operation: # name: "updateOne" # arguments: # filter: { _id: 1 } # update: { $inc: { x : 1 }} # outcome: # result: # matchedCount: 1 # modifiedCount: 1 # upsertedCount: 0 # collection: # data: # - { _id: 1, x: 12 } # - { _id: 2, x: 22 } # - # description: "UpdateOne is not committed on first attempt" # failPoint: # mode: { times: 1 } # data: { failBeforeCommitExceptionCode: 1 } # operation: # name: "updateOne" # arguments: # filter: { _id: 1 } # update: { $inc: { x : 1 }} # outcome: # result: # matchedCount: 1 # modifiedCount: 1 # upsertedCount: 0 # collection: # data: # - { _id: 1, x: 12 } # - { _id: 2, x: 22 } # - # description: "UpdateOne is never committed" # failPoint: # mode: { times: 2 } # data: { failBeforeCommitExceptionCode: 1 } # operation: # name: "updateOne" # arguments: # filter: { _id: 1 } # update: { $inc: { x : 1 }} # outcome: # error: true # collection: # data: # - { _id: 1, x: 11 } # - { _id: 2, x: 22 } - description: "UpdateOne with upsert is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "updateOne" arguments: filter: { _id: 3, x: 33 } update: { $inc: { x : 1 }} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 34 } - description: "UpdateOne with upsert is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "updateOne" arguments: filter: { _id: 3, x: 33 } update: { $inc: { x : 1 }} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 34 } - description: "UpdateOne with upsert is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "updateOne" arguments: filter: { _id: 3, x: 33 } update: { $inc: { x : 1 }} upsert: true outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/insertOne.yml0000644000004100000410000000277013257253113024645 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: - description: "InsertOne is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "insertOne" arguments: document: { _id: 3, x: 33 } outcome: result: insertedId: 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - description: "InsertOne is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "insertOne" arguments: document: { _id: 3, x: 33 } outcome: result: insertedId: 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - description: "InsertOne is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "insertOne" arguments: document: { _id: 3, x: 33 } outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/bulkWrite.yml0000644000004100000410000002413113257253113024642 0ustar www-datawww-datadata: - { _id: 1, x: 11 } minServerVersion: '3.6' tests: - description: "First command is retried" failPoint: mode: { times: 1 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} - name: "deleteOne" arguments: filter: { _id: 1 } options: { ordered: true } outcome: result: deletedCount: 1 insertedIds: { 0: 2 } matchedCount: 1 modifiedCount: 1 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 2, x: 23 } - # Write operations in this ordered batch are intentionally sequenced so # that each write command consists of a single statement, which will # fail on the first attempt and succeed on the second, retry attempt. description: "All commands are retried" failPoint: mode: { times: 7 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} - name: "insertOne" arguments: document: { _id: 3, x: 33 } - name: "updateOne" arguments: filter: { _id: 4, x: 44 } update: { $inc: { x : 1 }} upsert: true - name: "insertOne" arguments: document: { _id: 5, x: 55 } - name: "replaceOne" arguments: filter: { _id: 3 } replacement: { _id: 3, x: 333 } - name: "deleteOne" arguments: filter: { _id: 1 } options: { ordered: true } outcome: result: deletedCount: 1 insertedIds: { 0: 2, 2: 3, 4: 5 } matchedCount: 2 modifiedCount: 2 upsertedCount: 1 upsertedIds: { 3: 4 } collection: data: - { _id: 2, x: 23 } - { _id: 3, x: 333 } - { _id: 4, x: 45 } - { _id: 5, x: 55 } - description: "Both commands are retried after their first statement fails" failPoint: mode: { times: 2 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} options: { ordered: true } outcome: result: deletedCount: 0 insertedIds: { 0: 2 } matchedCount: 2 modifiedCount: 2 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 12 } - { _id: 2, x: 23 } - description: "Second command is retried after its second statement fails" failPoint: mode: { skip: 2 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} options: { ordered: true } outcome: result: deletedCount: 0 insertedIds: { 0: 2 } matchedCount: 2 modifiedCount: 2 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 12 } - { _id: 2, x: 23 } - description: "BulkWrite with unordered execution" failPoint: mode: { times: 1 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "insertOne" arguments: document: { _id: 3, x: 33 } options: { ordered: false } outcome: result: deletedCount: 0 insertedIds: { 0: 2, 1: 3 } matchedCount: 0 modifiedCount: 0 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - description: "First insertOne is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} - name: "deleteOne" arguments: filter: { _id: 1 } options: { ordered: true } outcome: error: true result: deletedCount: 0 insertedIds: { } matchedCount: 0 modifiedCount: 0 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 11 } - description: "Second updateOne is never committed" failPoint: mode: { skip: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "bulkWrite" arguments: requests: - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} - name: "deleteOne" arguments: filter: { _id: 1 } options: { ordered: true } outcome: error: true result: deletedCount: 0 insertedIds: { 0: 2 } matchedCount: 0 modifiedCount: 0 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - description: "Third updateOne is never committed" failPoint: mode: { skip: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "bulkWrite" arguments: requests: - name: "updateOne" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} - name: "insertOne" arguments: document: { _id: 2, x: 22 } - name: "updateOne" arguments: filter: { _id: 2 } update: { $inc: { x : 1 }} options: { ordered: true } outcome: error: true result: deletedCount: 0 insertedIds: { 0: 2 } matchedCount: 1 modifiedCount: 1 upsertedCount: 0 upsertedIds: { } collection: data: - { _id: 1, x: 12 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/findOneAndReplace.yml0000644000004100000410000000326213257253113026175 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: - description: "FindOneAndReplace is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "findOneAndReplace" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } returnDocument: "Before" outcome: result: { _id: 1, x: 11 } collection: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } - description: "FindOneAndReplace is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "findOneAndReplace" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } returnDocument: "Before" outcome: result: { _id: 1, x: 11 } collection: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } - description: "FindOneAndReplace is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "findOneAndReplace" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } returnDocument: "Before" outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/findOneAndUpdate.yml0000644000004100000410000000316213257253113026043 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: - description: "FindOneAndUpdate is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "findOneAndUpdate" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} returnDocument: "Before" outcome: result: { _id: 1, x: 11 } collection: data: - { _id: 1, x: 12 } - { _id: 2, x: 22 } - description: "FindOneAndUpdate is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "findOneAndUpdate" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} returnDocument: "Before" outcome: result: { _id: 1, x: 11 } collection: data: - { _id: 1, x: 12 } - { _id: 2, x: 22 } - description: "FindOneAndUpdate is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "findOneAndUpdate" arguments: filter: { _id: 1 } update: { $inc: { x : 1 }} outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/deleteOne.yml0000644000004100000410000000250013257253113024572 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: - description: "DeleteOne is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "deleteOne" arguments: filter: { _id: 1 } outcome: result: deletedCount: 1 collection: data: - { _id: 2, x: 22 } - description: "DeleteOne is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "deleteOne" arguments: filter: { _id: 1 } outcome: result: deletedCount: 1 collection: data: - { _id: 2, x: 22 } - description: "DeleteOne is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "deleteOne" arguments: filter: { _id: 1 } outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 }mongo-2.5.1/spec/support/retryable_writes_tests/replaceOne.yml0000644000004100000410000000325513257253113024753 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } minServerVersion: '3.6' tests: - description: "ReplaceOne is committed on first attempt" failPoint: mode: { times: 1 } operation: name: "replaceOne" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } - description: "ReplaceOne is not committed on first attempt" failPoint: mode: { times: 1 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "replaceOne" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - { _id: 1, x: 111 } - { _id: 2, x: 22 } - description: "ReplaceOne is never committed" failPoint: mode: { times: 2 } data: { failBeforeCommitExceptionCode: 1 } operation: name: "replaceOne" arguments: filter: { _id: 1 } replacement: { _id: 1, x: 111 } outcome: error: true collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } mongo-2.5.1/spec/support/retryable_writes_tests/insertMany.yml0000644000004100000410000000441013257253113025021 0ustar www-datawww-datadata: - { _id: 1, x: 11 } minServerVersion: '3.6' tests: - description: "InsertMany succeeds after one network error" failPoint: mode: { times: 1 } operation: name: "insertMany" arguments: documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } options: { ordered: true } outcome: result: insertedIds: - 2 - 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - description: "InsertMany with unordered execution" failPoint: mode: { times: 1 } operation: name: "insertMany" arguments: documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } options: { ordered: false } outcome: result: insertedIds: - 2 - 3 collection: data: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - description: "InsertMany fails after multiple network errors" failPoint: # Normally, a mongod will insert the documents as a batch with a # single commit. If this fails, mongod may try to insert each # document one at a time depending on the failure. Therefore our # single insert command may trigger the failpoint twice on each # driver attempt. This test permanently enables the fail point to # ensure the retry attempt always fails. mode: "alwaysOn" data: { failBeforeCommitExceptionCode: 1 } operation: name: "insertMany" arguments: documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } - { _id: 4, x: 44 } options: { ordered: true } outcome: error: true collection: data: - { _id: 1, x: 11 } mongo-2.5.1/spec/support/server_selection/0000755000004100000410000000000013257253113020711 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/0000755000004100000410000000000013257253113022676 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Unknown/0000755000004100000410000000000013257253113024335 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Unknown/read/0000755000004100000410000000000013257253113025250 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Unknown/read/SecondaryPreferred.yml0000644000004100000410000000030013257253113031552 0ustar www-datawww-data--- topology_description: type: Unknown servers: [] operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/selection/Single/0000755000004100000410000000000013257253113024117 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Single/read/0000755000004100000410000000000013257253113025032 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Single/read/SecondaryPreferred.yml0000644000004100000410000000044313257253113031344 0ustar www-datawww-data--- topology_description: type: Single servers: - &1 address: a:27017 avg_rtt_ms: 5 type: Standalone tags: data_center: dc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/Sharded/0000755000004100000410000000000013257253113024250 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Sharded/read/0000755000004100000410000000000013257253113025163 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/Sharded/read/SecondaryPreferred.yml0000644000004100000410000000060613257253113031476 0ustar www-datawww-data--- topology_description: type: Sharded servers: - &1 address: g:27017 avg_rtt_ms: 5 type: Mongos tags: data_center: nyc - &2 address: h:27017 avg_rtt_ms: 35 type: Mongos tags: data_center: dc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/0000755000004100000410000000000013257253113026572 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/0000755000004100000410000000000013257253113027505 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/SecondaryPreferred.yml0000644000004100000410000000063613257253113034023 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000014600000000000011566 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest_non_matching.ym0000644000004100000410000000057313257253113034206 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] ././@LongLink0000000000000000000000000000014600000000000011566 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.ym0000644000004100000410000000114313257253113034232 0ustar www-datawww-data# Catch bugs like CDRIVER-1447, ensure clients select a server that matches all # tags, even when the other server mismatches multiple tags. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: rack: one data_center: nyc - &2 address: c:27017 avg_rtt_ms: 5 type: RSSecondary tags: rack: two data_center: sf operation: read read_preference: mode: Secondary tag_sets: - data_center: nyc rack: one - other_tag: doesntexist suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary.yml0000644000004100000410000000062513257253113032162 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Secondary tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000015700000000000011570 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_ma0000644000004100000410000000060413257253113034241 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: PrimaryPreferred tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest_multiple.yml0000644000004100000410000000063013257253113033543 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 10 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 20 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 - *2 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest.yml0000644000004100000410000000062313257253113031632 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000016100000000000011563 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_0000644000004100000410000000060613257253113034231 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Primary.yml0000644000004100000410000000055613257253113031661 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Primary tag_sets: - {} suitable_servers: [] in_latency_window: [] ././@LongLink0000000000000000000000000000014700000000000011567 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.y0000644000004100000410000000117113257253113034140 0ustar www-datawww-data# Ensure clients select a server that matches all tags, even when the other # server matches one tag and doesn't match the other. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: rack: one data_center: nyc - &2 address: c:27017 avg_rtt_ms: 5 type: RSSecondary tags: rack: two # mismatch data_center: nyc # match operation: read read_preference: mode: Secondary tag_sets: - data_center: nyc rack: one - other_tag: doesntexist suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/PrimaryPreferred.yml0000644000004100000410000000061613257253113033515 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: PrimaryPreferred tag_sets: - {} suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000015000000000000011561 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_non_matching.0000644000004100000410000000057513257253113034170 0ustar www-datawww-data--- topology_description: type: ReplicaSetNoPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc operation: read read_preference: mode: Secondary tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/0000755000004100000410000000000013257253113027131 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/0000755000004100000410000000000013257253113030044 5ustar www-datawww-data././@LongLink0000000000000000000000000000014600000000000011566 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred.ym0000644000004100000410000000077513257253113034212 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000015000000000000011561 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest_non_matching.0000644000004100000410000000073213257253113034174 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Secondary.yml0000644000004100000410000000076413257253113032525 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &2 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Secondary tag_sets: - data_center: nyc suitable_servers: - *1 - *2 in_latency_window: - *1 ././@LongLink0000000000000000000000000000016100000000000011563 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_0000644000004100000410000000075613257253113034272 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &1 address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: PrimaryPreferred tag_sets: - data_center: sf suitable_servers: - *1 in_latency_window: - *1 ././@LongLink0000000000000000000000000000015300000000000011564 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred_ta0000644000004100000410000000077513257253113034252 0ustar www-datawww-data# Attempt to select the secondary, except its tag doesn't match. # Fall back to primary. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 avg_rtt_ms: 5 type: RSPrimary tags: data_center: nyc - &2 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: sf # No match. operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: nyc suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest_multiple.yml0000644000004100000410000000100413257253113034076 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: b:27017 avg_rtt_ms: 10 type: RSSecondary tags: data_center: nyc - &3 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &2 address: a:27017 avg_rtt_ms: 20 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: nyc suitable_servers: - *1 - *2 - *3 in_latency_window: - *1 - *2 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest.yml0000644000004100000410000000077613257253113032202 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - &3 address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &2 address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Nearest tag_sets: - data_center: nyc suitable_servers: - *1 - *2 - *3 in_latency_window: - *1 ././@LongLink0000000000000000000000000000016300000000000011565 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/SecondaryPreferred_no0000644000004100000410000000076013257253113034254 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &1 address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: SecondaryPreferred tag_sets: - data_center: sf suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Primary.yml0000644000004100000410000000073013257253113032212 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &1 address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Primary tag_sets: - {} suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/PrimaryPreferred.yml0000644000004100000410000000074113257253113034053 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - &1 address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: PrimaryPreferred tag_sets: - {} suitable_servers: - *1 in_latency_window: - *1 ././@LongLink0000000000000000000000000000015200000000000011563 Lustar rootrootmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Secondary_non_matching.ymlmongo-2.5.1/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Secondary_non_matchin0000644000004100000410000000073413257253113034277 0ustar www-datawww-data--- topology_description: type: ReplicaSetWithPrimary servers: - address: b:27017 avg_rtt_ms: 5 type: RSSecondary tags: data_center: nyc - address: c:27017 avg_rtt_ms: 100 type: RSSecondary tags: data_center: nyc - address: a:27017 avg_rtt_ms: 26 type: RSPrimary tags: data_center: nyc operation: read read_preference: mode: Secondary tag_sets: - data_center: sf suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/server_selection/rtt/0000755000004100000410000000000013257253113021522 5ustar www-datawww-datamongo-2.5.1/spec/support/server_selection/rtt/value_test_2.yml0000644000004100000410000000006513257253113024642 0ustar www-datawww-data--- avg_rtt_ms: 3.1 new_rtt_ms: 36 new_avg_rtt: 9.68 mongo-2.5.1/spec/support/server_selection/rtt/value_test_3.yml0000644000004100000410000000007013257253113024637 0ustar www-datawww-data--- avg_rtt_ms: 9.12 new_rtt_ms: 9.12 new_avg_rtt: 9.12 mongo-2.5.1/spec/support/server_selection/rtt/first_value.yml0000644000004100000410000000006613257253113024572 0ustar www-datawww-data--- avg_rtt_ms: 'NULL' new_rtt_ms: 10 new_avg_rtt: 10 mongo-2.5.1/spec/support/server_selection/rtt/value_test_4.yml0000644000004100000410000000006613257253113024645 0ustar www-datawww-data--- avg_rtt_ms: 1 new_rtt_ms: 1000 new_avg_rtt: 200.8 mongo-2.5.1/spec/support/server_selection/rtt/value_test_1.yml0000644000004100000410000000006113257253113024635 0ustar www-datawww-data--- avg_rtt_ms: 0 new_rtt_ms: 5 new_avg_rtt: 1.0 mongo-2.5.1/spec/support/server_selection/rtt/value_test_5.yml0000644000004100000410000000006513257253113024645 0ustar www-datawww-data--- avg_rtt_ms: 0 new_rtt_ms: 0.25 new_avg_rtt: 0.05 mongo-2.5.1/spec/support/server_selection/rtt/first_value_zero.yml0000644000004100000410000000006413257253113025627 0ustar www-datawww-data--- avg_rtt_ms: 'NULL' new_rtt_ms: 0 new_avg_rtt: 0 mongo-2.5.1/spec/support/crud.rb0000644000004100000410000002143113257253113016621 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Matcher for determining if the results of the opeartion match the # test's expected results. # # @since 2.0.0 # Matcher for determining if the collection's data matches the # test's expected collection data. # # @since 2.0.0 RSpec::Matchers.define :match_collection_data do |test| match do test.compare_collection_data end end RSpec::Matchers.define :match_operation_result do |test| match do |actual| test.compare_operation_result(actual) end end require 'support/crud/read' require 'support/crud/write' module Mongo module CRUD # Represents a CRUD specification test. # # @since 2.0.0 class Spec # @return [ String ] description The spec description. # # @since 2.0.0 attr_reader :description # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.0.0 def initialize(file) file = File.new(file) @spec = YAML.load(ERB.new(file.read).result) file.close @description = File.basename(file) @data = @spec['data'] @crud_tests = @spec['tests'] @min_server_version = @spec['minServerVersion'] @max_server_version = @spec['maxServerVersion'] end # Whether the test can be run on a given server version. # # @example Can the test run on this server version? # spec.server_version_satisfied?(client) # # @param [ Mongo::Client ] client The client to check. # # @return [ true, false ] Whether the test can be run on the given # server version. # # @since 2.4.0 def server_version_satisfied?(client) lower_bound_satisfied?(client) && upper_bound_satisfied?(client) end # Get a list of CRUDTests for each test definition. # # @example Get the list of CRUDTests. # spec.tests # # @return [ Array ] The list of CRUDTests. # # @since 2.0.0 def tests @crud_tests.collect do |test| Mongo::CRUD::CRUDTest.new(@data, test) end end private def upper_bound_satisfied?(client) return true unless @max_server_version client.database.command(buildInfo: 1).first['version'] <= @max_server_version end def lower_bound_satisfied?(client) return true unless @min_server_version @min_server_version <= client.database.command(buildInfo: 1).first['version'] end end # Represents a single CRUD test. # # @since 2.0.0 class CRUDTest # The test description. # # @return [ String ] description The test description. # # @since 2.0.0 attr_reader :description FAIL_POINT_BASE_COMMAND = { configureFailPoint: "onPrimaryTransactionalWrite" } # Instantiate the new CRUDTest. # # @example Create the test. # CRUDTest.new(data, test) # # @param [ Array ] data The documents the collection # must have before the test runs. # @param [ Hash ] test The test specification. # # @since 2.0.0 def initialize(data, test) @data = data @fail_point_command = FAIL_POINT_BASE_COMMAND.merge(test['failPoint']) if test['failPoint'] @description = test['description'] @operation = Operation.get(test['operation']) @outcome = test['outcome'] end # Run the test. # # @example Run the test. # test.run(collection) # # @param [ Collection ] collection The collection the test # should be run on. # # @return [ Result, Array ] The result(s) of running the test. # # @since 2.0.0 def run(collection) @operation.execute(collection) end def setup_test(collection) clear_fail_point(collection) @collection = collection collection.delete_many collection.insert_many(@data) set_up_fail_point(collection) end def set_up_fail_point(collection) collection.client.use(:admin).command(@fail_point_command) if @fail_point_command end def clear_fail_point(collection) if @fail_point_command collection.client.use(:admin).command(FAIL_POINT_BASE_COMMAND.merge(mode: "off")) end end # The expected result of running the test. # # @example Get the expected result of running the test. # test.result # # @return [ Array ] The expected result of running the test. # # @since 2.0.0 def result @operation.has_results? ? @outcome['result'] : [] end # Compare the existing collection data and the expected collection data. # # @example Compare the existing and expected collection data. # test.compare_collection_data # # @return [ true, false ] The result of comparing the existing and expected # collection data. # # @since 2.0.0 def compare_collection_data if actual_collection_data.nil? outcome_collection_data.nil? elsif actual_collection_data.empty? outcome_collection_data.empty? else actual_collection_data.all? do |doc| outcome_collection_data.include?(doc) end end end # Compare the actual operation result to the expected operation result. # # @example Compare the existing and expected operation results. # test.compare_operation_result(actual_results) # # @params [ Object ] actual The actual test results. # # @return [ true, false ] The result of comparing the expected and actual operation result. # # @since 2.4.0 def compare_operation_result(actual) if actual.is_a?(Array) actual.empty? || @outcome['result'].each_with_index do |expected, i| compare_result(expected, actual[i]) end else compare_result(@outcome['result'], actual) end end # The expected data in the collection as an outcome after running this test. # # @example Get the outcome collection data # test.outcome_collection_data # # @return [ Array ] The list of documents expected to be in the collection # after running this test. # # @since 2.4.0 def outcome_collection_data @outcome['collection']['data'] if @outcome['collection'] end def error? !!@outcome['error'] end private def compare_result(expected, actual) case expected when nil actual.nil? when Hash results = actual.instance_variable_get(:@results) (results || actual).all? do |k, v| expected[k] == v || handle_upserted_id(k, expected[k], v) || handle_inserted_ids(k, expected[k], v) end when Integer expected == actual end end def handle_upserted_id(field, expected_id, actual_id) return true if expected_id.nil? if field == 'upsertedId' if expected_id.is_a?(Integer) actual_id.is_a?(BSON::ObjectId) || actual_id.nil? end end end def handle_inserted_ids(field, expected, actual) if field == 'insertedIds' expected.values == actual end end def actual_collection_data if @outcome['collection'] collection_name = @outcome['collection']['name'] || @collection.name @collection.database[collection_name].find.to_a end end end # Helper module for instantiating either a Read or Write test operation. # # @since 2.0.0 module Operation extend self # Get a new Operation. # # @example Get the operation. # Operation.get(spec) # # @param [ Hash ] spec The operation specification. # # @return [ Operation::Write, Operation::Read ] The Operation object. # # @since 2.0.0 def get(spec) if Write::OPERATIONS.keys.include?(spec['name']) Write.new(spec) else Read.new(spec) end end end end end mongo-2.5.1/spec/support/command_monitoring.rb0000644000004100000410000002324413257253113021553 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # RSpec::Matchers.define :match_command_name do |expectation| match do |event| expect(event.command_name.to_s).to eq(expectation.command_name.to_s) end end RSpec::Matchers.define :match_database_name do |expectation| match do |event| expect(event.database_name.to_s).to eq(expectation.database_name.to_s) end end RSpec::Matchers.define :generate_request_id do |expectation| match do |event| expect(event.request_id).to be > 0 end end RSpec::Matchers.define :generate_operation_id do |expectation| match do |event| expect(event.request_id).to be > 0 end end RSpec::Matchers.define :match_command do |expectation| include Mongo::CommandMonitoring::Matchable match do |event| data_matches?(event.command, expectation.event_data['command']) end end RSpec::Matchers.define :match_reply do |expectation| include Mongo::CommandMonitoring::Matchable match do |event| data_matches?(event.reply, expectation.event_data['reply']) end end RSpec::Matchers.define :match_command_started_event do |expectation| match do |event| expect(event).to match_command_name(expectation) expect(event).to match_database_name(expectation) expect(event).to generate_operation_id expect(event).to generate_request_id expect(event).to match_command(expectation) end end RSpec::Matchers.define :match_command_succeeded_event do |expectation| match do |event| expect(event).to match_command_name(expectation) expect(event).to generate_operation_id expect(event).to generate_request_id expect(event).to match_reply(expectation) end end RSpec::Matchers.define :match_command_failed_event do |expectation| match do |event| expect(event).to match_command_name(expectation) expect(event).to generate_operation_id expect(event).to generate_request_id end end module Mongo module CommandMonitoring # Matchers common behaviour. # # @since 2.1.0 module Matchable # Determine if the data matches. # # @example Does the data match? # matchable.data_matches?(actual, expected) # # @param [ Object ] actual The actual data. # @param [ Object ] expected The expected data. # # @return [ true, false ] If the data matches. # # @since 2.1.0 def data_matches?(actual, expected) case expected when ::Hash, BSON::Document then hash_matches?(actual, expected) when ::Array array_matches?(actual, expected) else value_matches?(actual, expected) end end # Determine if the hash matches. # # @example Does the hash match? # matchable.hash_matches?(actual, expected) # # @param [ Hash ] actual The actual hash. # @param [ Hash ] expected The expected hash. # # @return [ true, false ] If the hash matches. # # @since 2.1.0 def hash_matches?(actual, expected) if expected['writeConcern'] expected['writeConcern'] = Options::Mapper.transform_keys_to_symbols(expected['writeConcern']) end if expected.keys.first == '$numberLong' converted = expected.values.first.to_i (actual == converted) || actual >= 0 else expected.each do |key, value| return false unless data_matches?(actual[key], value) end end end # Determine if an array matches. # # @example Does the array match? # matchable.array_matches?(actual, expected) # # @param [ Array ] actual The actual array. # @param [ Array ] expected The expected array. # # @return [ true, false ] If the array matches. # # @since 2.1.0 def array_matches?(actual, expected) expected.each_with_index do |value, i| # @todo: Durran: fix for kill cursors replies if actual return false unless data_matches?(actual[i], value) end end end # Check if a value matches. # # @example Does a value match. # matchable.value_matches?(actual, expected) # # @param [ Object ] actual The actual value. # @param [ Object ] expected The expected object. # # @return [ true, false ] If the value matches. # # @since 2.1.0 def value_matches?(actual, expected) case expected when '42', 42 then actual > 0 when '' then !actual.nil? else actual == expected end end end # Represents a command monitoring spec in its entirety. # # @since 2.1.0 class Spec # Create the spec. # # @example Create the spec. # Spec.new('/path/to/test') # # @param [ String ] file The yaml test file. # # @since 2.1.0 def initialize(file) file = File.new(file) @spec = YAML.load(ERB.new(file.read).result) file.close @data = @spec['data'] @tests = @spec['tests'] end # Get all the tests in the spec. # # @example Get all the tests. # spec.tests # # @return [ Array ] The tests. def tests @tests.map do |test| Test.new(@data, test) end end end # Represents an individual command monitoring test. # # @since 2.1.0 class Test # @return [ String ] description The test description. attr_reader :description # @return [ Array ] The expectations. attr_reader :expectations # @return [ String ] The server version to ignore if greater. attr_reader :ignore_if_server_version_greater_than # @return [ String ] The server version to ignore if lower. attr_reader :ignore_if_server_version_less_than # Create the new test. # # @example Create the test. # Test.new(data, test) # # @param [ Array ] data The test data. # @param [ Hash ] The test itself. # # @since 2.1.0 def initialize(data, test) @data = data @description = test['description'] @ignore_if_server_version_greater_than = test['ignore_if_server_version_greater_than'] @ignore_if_server_version_less_than = test['ignore_if_server_version_less_than'] @operation = Mongo::CRUD::Operation.get(test['operation']) @expectations = test['expectations'].map{ |e| Expectation.new(e) } end # Run the test against the provided collection. # # @example Run the test. # test.run(collection) # # @param [ Mongo::Collection ] collection The collection. # # @since 2.1.0 def run(collection) collection.insert_many(@data) @operation.execute(collection) end end # Encapsulates expectation behaviour. # # @since 2.1.0 class Expectation # @return [ String ] event_type The type of expected event. attr_reader :event_type # @return [ Hash ] event_data The event data. attr_reader :event_data # Get the expected command name. # # @example Get the expected command name. # expectation.command_name # # @return [ String ] The command name. # # @since 2.1.0 def command_name @event_data['command_name'] end # Get the expected database name. # # @example Get the expected database name. # expectation.database_name # # @return [ String ] The database name. # # @since 2.1.0 def database_name @event_data['database_name'] end # Get a readable event name. # # @example Get the event name. # expectation.event_name # # @return [ String ] The event name. # # @since 2.1.0 def event_name event_type.gsub('_', ' ') end # Create the new expectation. # # @example Create the new expectation. # Expectation.new(expectation) # # @param [ Hash ] expectation The expectation. # # @since 2.1.0 def initialize(expectation) @event_type = expectation.keys.first @event_data = expectation[@event_type] end # Get the name of the matcher. # # @example Get the matcher name. # expectation.matcher # # @return [ String ] The matcher name. # # @since 2.1.0 def matcher "match_#{event_type}" end end # The test subscriber to track the events. # # @since 2.1.0 class TestSubscriber def started(event) command_started_event[event.command_name] = event end def succeeded(event) command_succeeded_event[event.command_name] = event end def failed(event) command_failed_event[event.command_name] = event end private def command_started_event @started_events ||= BSON::Document.new end def command_succeeded_event @succeeded_events ||= BSON::Document.new end def command_failed_event @failed_events ||= BSON::Document.new end end end end mongo-2.5.1/spec/support/crud_tests/0000755000004100000410000000000013257253113017515 5ustar www-datawww-datamongo-2.5.1/spec/support/crud_tests/write/0000755000004100000410000000000013257253113020647 5ustar www-datawww-datamongo-2.5.1/spec/support/crud_tests/write/updateMany-arrayFilters.yml0000644000004100000410000000337713257253113026160 0ustar www-datawww-datadata: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} minServerVersion: '3.5.6' tests: - description: "UpdateMany when no documents match arrayFilters" operation: name: "updateMany" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 4} outcome: result: matchedCount: 2 modifiedCount: 0 collection: data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - description: "UpdateMany when one document matches arrayFilters" operation: name: "updateMany" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 3} outcome: result: matchedCount: 2 modifiedCount: 1 collection: data: - {_id: 1, y: [{b: 2}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - description: "UpdateMany when multiple documents match arrayFilters" operation: name: "updateMany" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 1} outcome: result: matchedCount: 2 modifiedCount: 2 collection: data: - {_id: 1, y: [{b: 3}, {b: 2}]} - {_id: 2, y: [{b: 0}, {b: 2}]} mongo-2.5.1/spec/support/crud_tests/write/findOneAndUpdate-collation.yml0000644000004100000410000000153413257253113026527 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} - {_id: 3, x: 'pINg'} minServerVersion: '3.4' tests: - description: "FindOneAndUpdate when many documents match with collation returning the document before modification" operation: name: findOneAndUpdate arguments: filter: x: 'PING' update: $set: {x: 'pong'} projection: {x: 1, _id: 0} sort: {_id: 1} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: {x: 'ping'} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 'pong'} - {_id: 3, x: 'pINg'}mongo-2.5.1/spec/support/crud_tests/write/bulkWrite-arrayFilters.yml0000644000004100000410000000267613257253113026022 0ustar www-datawww-datadata: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} minServerVersion: '3.5.6' tests: - description: "BulkWrite with arrayFilters" operation: name: "bulkWrite" arguments: requests: - # UpdateOne when one document matches arrayFilters name: "updateOne" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 3} - # UpdateMany when multiple documents match arrayFilters name: "updateMany" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 1} options: { ordered: true } outcome: result: deletedCount: 0 insertedIds: {} matchedCount: 3 modifiedCount: 3 upsertedCount: 0 upsertedIds: {} collection: data: - {_id: 1, y: [{b: 2}, {b: 2}]} - {_id: 2, y: [{b: 0}, {b: 2}]} mongo-2.5.1/spec/support/crud_tests/write/findOneAndReplace-upsert.yml0000644000004100000410000000623013257253113026214 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' tests: - description: "FindOneAndReplace when no documents match without id specified with upsert returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} # Omit the sort option as it has no effect when no documents # match and would only cause an inconsistent return value on # pre-3.0 servers when combined with returnDocument "before" # (see: SERVER-17650). upsert: true outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} - description: "FindOneAndReplace when no documents match without id specified with upsert returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} upsert: true outcome: result: {x: 44} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} - description: "FindOneAndReplace when no documents match with id specified with upsert returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {_id: 4, x: 44} projection: {x: 1, _id: 0} # Omit the sort option as it has no effect when no documents # match and would only cause an inconsistent return value on # pre-3.0 servers when combined with returnDocument "before" # (see: SERVER-17650). upsert: true outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} - description: "FindOneAndReplace when no documents match with id specified with upsert returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {_id: 4, x: 44} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} upsert: true outcome: result: {x: 44} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} mongo-2.5.1/spec/support/crud_tests/write/updateOne-collation.yml0000644000004100000410000000133313257253113025300 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} minServerVersion: '3.4' tests: - description: "UpdateOne when one document matches with collation" operation: name: "updateOne" arguments: filter: {x: 'PING'} update: $set: {x: 'pong'} collation: { locale: 'en_US', strength: 2} # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 'pong'} mongo-2.5.1/spec/support/crud_tests/write/findOneAndDelete.yml0000644000004100000410000000257413257253113024532 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "FindOneAndDelete when many documents match" operation: name: findOneAndDelete arguments: filter: _id: {$gt: 1} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 3, x: 33} - description: "FindOneAndDelete when one document matches" operation: name: findOneAndDelete arguments: filter: {_id: 2} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 3, x: 33} - description: "FindOneAndDelete when no documents match" operation: name: findOneAndDelete arguments: filter: {_id: 4} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33}mongo-2.5.1/spec/support/crud_tests/write/deleteMany-collation.yml0000644000004100000410000000114013257253113025437 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} - {_id: 3, x: 'pINg'} minServerVersion: '3.4' tests: - description: "DeleteMany when many documents match with collation" operation: name: "deleteMany" arguments: filter: x: 'PING' collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: deletedCount: 2 collection: data: - {_id: 1, x: 11} mongo-2.5.1/spec/support/crud_tests/write/updateOne.yml0000644000004100000410000000436713257253113023330 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' tests: - description: "UpdateOne when many documents match" operation: name: "updateOne" arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 # Can't verify collection data because we don't have a way of # knowing which document gets updated. - description: "UpdateOne when one document matches" operation: name: "updateOne" arguments: filter: {_id: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 12} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateOne when no documents match" operation: name: "updateOne" arguments: filter: {_id: 4} update: $inc: {x: 1} outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateOne with upsert when no documents match" operation: name: "updateOne" arguments: filter: {_id: 4} update: $inc: {x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/findOneAndReplace-collation.yml0000644000004100000410000000142713257253113026661 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} minServerVersion: '3.4' tests: - description: "FindOneAndReplace when one document matches with collation returning the document after modification" operation: name: findOneAndReplace arguments: filter: {x: 'PING'} replacement: {x: 'pong'} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: {x: 'pong'} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 'pong'} mongo-2.5.1/spec/support/crud_tests/write/insertOne.yml0000644000004100000410000000062713257253113023345 0ustar www-datawww-datadata: - {_id: 1, x: 11} tests: - description: "InsertOne with a non-existing document" operation: name: "insertOne" arguments: document: {_id: 2, x: 22} outcome: result: insertedId: 2 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22}mongo-2.5.1/spec/support/crud_tests/write/updateOne-arrayFilters.yml0000644000004100000410000000641713257253113025773 0ustar www-datawww-datadata: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 1}] }]} minServerVersion: '3.5.6' tests: - description: "UpdateOne when no document matches arrayFilters" operation: name: "updateOne" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 4} outcome: result: matchedCount: 1 modifiedCount: 0 collection: data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 1}] }]} - description: "UpdateOne when one document matches arrayFilters" operation: name: "updateOne" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 3} outcome: result: matchedCount: 1 modifiedCount: 1 collection: data: - {_id: 1, y: [{b: 2}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 1}] }]} - description: "UpdateOne when multiple documents match arrayFilters" operation: name: "updateOne" arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 1} outcome: result: matchedCount: 1 modifiedCount: 1 collection: data: - {_id: 1, y: [{b: 3}, {b: 2}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 1}] }]} - description: "UpdateOne when no documents match multiple arrayFilters" operation: name: "updateOne" arguments: filter: {_id: 3} update: $set: {"y.$[i].c.$[j].d": 0} arrayFilters: - {i.b: 5} - {j.d: 3} outcome: result: matchedCount: 1 modifiedCount: 0 collection: data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 1}] }]} - description: "UpdateOne when one document matches multiple arrayFilters" operation: name: "updateOne" arguments: filter: {_id: 3} update: $set: {"y.$[i].c.$[j].d": 0} arrayFilters: - {i.b: 5} - {j.d: 1} outcome: result: matchedCount: 1 modifiedCount: 1 collection: data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - {_id: 3, y: [{b: 5, c: [{d: 2}, {d: 0}] }]} mongo-2.5.1/spec/support/crud_tests/write/findOneAndDelete-collation.yml0000644000004100000410000000127513257253113026511 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} - {_id: 3, x: 'pINg'} minServerVersion: '3.4' tests: - description: "FindOneAndDelete when one document matches with collation" operation: name: findOneAndDelete arguments: filter: {_id: 2, x: 'PING'} projection: {x: 1, _id: 0} sort: {x: 1} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: {x: 'ping'} collection: data: - {_id: 1, x: 11} - {_id: 3, x: 'pINg'}mongo-2.5.1/spec/support/crud_tests/write/findOneAndReplace.yml0000644000004100000410000000663513257253113024705 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "FindOneAndReplace when many documents match returning the document before modification" operation: name: findOneAndReplace arguments: filter: _id: {$gt: 1} replacement: {x: 32} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 32} - {_id: 3, x: 33} - description: "FindOneAndReplace when many documents match returning the document after modification" operation: name: findOneAndReplace arguments: filter: _id: {$gt: 1} replacement: {x: 32} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: {x: 32} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 32} - {_id: 3, x: 33} - description: "FindOneAndReplace when one document matches returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 2} replacement: {x: 32} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 32} - {_id: 3, x: 33} - description: "FindOneAndReplace when one document matches returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 2} replacement: {x: 32} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: {x: 32} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 32} - {_id: 3, x: 33} - description: "FindOneAndReplace when no documents match returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "FindOneAndReplace when no documents match returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} mongo-2.5.1/spec/support/crud_tests/write/updateOne-pre_2.6.yml0000644000004100000410000000452613257253113024476 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} # This file includes the same test cases as updateOne.yml with some omissions # for pre-2.6 servers. We cannot verify the update result's modifiedCount as it # is not available with legacy write operations and getLastError. maxServerVersion: '2.4.99' tests: - description: "UpdateOne when many documents match" operation: name: "updateOne" arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 upsertedCount: 0 # Can't verify collection data because we don't have a way of # knowing which document gets updated. - description: "UpdateOne when one document matches" operation: name: "updateOne" arguments: filter: {_id: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 12} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateOne when no documents match" operation: name: "updateOne" arguments: filter: {_id: 4} update: $inc: {x: 1} outcome: result: matchedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateOne with upsert when no documents match" operation: name: "updateOne" arguments: filter: {_id: 4} update: $inc: {x: 1} upsert: true outcome: result: matchedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/replaceOne-collation.yml0000644000004100000410000000131613257253113025432 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} minServerVersion: '3.4' tests: - description: "ReplaceOne when one document matches with collation" operation: name: "replaceOne" arguments: filter: {x: 'PING'} replacement: {_id: 2, x: 'pong'} collation: {locale: 'en_US', strength: 2} # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 'pong'}mongo-2.5.1/spec/support/crud_tests/write/findOneAndUpdate.yml0000644000004100000410000001203213257253113024540 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "FindOneAndUpdate when many documents match returning the document before modification" operation: name: findOneAndUpdate arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 33} - description: "FindOneAndUpdate when many documents match returning the document after modification" operation: name: findOneAndUpdate arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: {x: 23} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 33} - description: "FindOneAndUpdate when one document matches returning the document before modification" operation: name: findOneAndUpdate arguments: filter: {_id: 2} update: $inc: {x: 1} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: {x: 22} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 33} - description: "FindOneAndUpdate when one document matches returning the document after modification" operation: name: findOneAndUpdate arguments: filter: {_id: 2} update: $inc: {x: 1} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: {x: 23} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 33} - description: "FindOneAndUpdate when no documents match returning the document before modification" operation: name: findOneAndUpdate arguments: filter: {_id: 4} update: $inc: {x: 1} projection: {x: 1, _id: 0} sort: {x: 1} outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "FindOneAndUpdate when no documents match with upsert returning the document before modification" operation: name: findOneAndUpdate arguments: filter: {_id: 4} update: $inc: {x: 1} projection: {x: 1, _id: 0} # Omit the sort option as it has no effect when no documents # match and would only cause an inconsistent return value on # pre-3.0 servers when combined with returnDocument "before" # (see: SERVER-17650). upsert: true outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} - description: "FindOneAndUpdate when no documents match returning the document after modification" operation: name: findOneAndUpdate arguments: filter: {_id: 4} update: $inc: {x: 1} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "FindOneAndUpdate when no documents match with upsert returning the document after modification" operation: name: findOneAndUpdate arguments: filter: {_id: 4} update: $inc: {x: 1} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} upsert: true outcome: result: {x: 1} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1}mongo-2.5.1/spec/support/crud_tests/write/replaceOne-upsert.yml0000644000004100000410000000257013257253113024773 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' # See SERVER-5289 for why the collection data is only checked for server versions >= 2.6 tests: - description: "ReplaceOne with upsert when no documents match without an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} - description: "ReplaceOne with upsert when no documents match with an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {_id: 4, x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/deleteMany.yml0000644000004100000410000000150113257253113023456 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "DeleteMany when many documents match" operation: name: "deleteMany" arguments: filter: _id: {$gt: 1} outcome: result: deletedCount: 2 collection: data: - {_id: 1, x: 11} - description: "DeleteMany when no document matches" operation: name: "deleteMany" arguments: filter: {_id: 4} outcome: result: deletedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} mongo-2.5.1/spec/support/crud_tests/write/replaceOne-pre_2.6.yml0000644000004100000410000000573413257253113024631 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} # This file includes the same test cases as replaceOne.yml with some omissions # for pre-2.6 servers. We cannot verify the update result's modifiedCount as it # is not available with legacy write operations and getLastError. Additionally, # we cannot verify the ID of an upserted document in some cases due to # SERVER-5289. maxServerVersion: '2.4.99' tests: - description: "ReplaceOne when many documents match" operation: name: "replaceOne" arguments: filter: _id: {$gt: 1} replacement: {x: 111} outcome: result: matchedCount: 1 upsertedCount: 0 # Can't verify collection data because we don't have a way of # knowing which document gets updated. - description: "ReplaceOne when one document matches" operation: name: "replaceOne" arguments: filter: {_id: 1} replacement: {_id: 1, x: 111} outcome: result: matchedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 111} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "ReplaceOne when no documents match" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {_id: 4, x: 1} outcome: result: matchedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "ReplaceOne with upsert when no documents match without an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {x: 1} upsert: true outcome: result: matchedCount: 0 upsertedCount: 1 # Can't verify upsertedId or collection data because server versions # before 2.6 do not take the _id from the filter document during an # upsert (see: SERVER-5289) - description: "ReplaceOne with upsert when no documents match with an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {_id: 4, x: 1} upsert: true outcome: result: matchedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/deleteOne.yml0000644000004100000410000000231213257253113023274 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "DeleteOne when many documents match" operation: name: "deleteOne" arguments: filter: _id: {$gt: 1} outcome: result: deletedCount: 1 # can't verify collection because we don't have a way # of knowing which document gets deleted. - description: "DeleteOne when one document matches" operation: name: "deleteOne" arguments: filter: {_id: 2} outcome: result: deletedCount: 1 collection: data: - {_id: 1, x: 11} - {_id: 3, x: 33} - description: "DeleteOne when no documents match" operation: name: "deleteOne" arguments: filter: {_id: 4} outcome: result: deletedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} mongo-2.5.1/spec/support/crud_tests/write/replaceOne.yml0000644000004100000410000000552413257253113023455 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' tests: - description: "ReplaceOne when many documents match" operation: name: "replaceOne" arguments: filter: _id: {$gt: 1} replacement: {x: 111} outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 # Can't verify collection data because we don't have a way of # knowing which document gets updated. - description: "ReplaceOne when one document matches" operation: name: "replaceOne" arguments: filter: {_id: 1} replacement: {_id: 1, x: 111} outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 111} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "ReplaceOne when no documents match" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {_id: 4, x: 1} outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "ReplaceOne with upsert when no documents match without an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} - description: "ReplaceOne with upsert when no documents match with an id specified" operation: name: "replaceOne" arguments: filter: {_id: 4} replacement: {_id: 4, x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/updateMany.yml0000644000004100000410000000444213257253113023505 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' tests: - description: "UpdateMany when many documents match" operation: name: "updateMany" arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} outcome: result: matchedCount: 2 modifiedCount: 2 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 34} - description: "UpdateMany when one document matches" operation: name: "updateMany" arguments: filter: {_id: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 modifiedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 12} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateMany when no documents match" operation: name: "updateMany" arguments: filter: {_id: 4} update: $inc: {x: 1} outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateMany with upsert when no documents match" operation: name: "updateMany" arguments: filter: {_id: 4} update: $inc: {x: 1} upsert: true outcome: result: matchedCount: 0 modifiedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/deleteOne-collation.yml0000644000004100000410000000116613257253113025264 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} - {_id: 3, x: 'pINg'} minServerVersion: '3.4' tests: - description: "DeleteOne when many documents matches with collation" operation: name: "deleteOne" arguments: filter: {x: 'PING'} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: deletedCount: 1 collection: data: - {_id: 1, x: 11} - {_id: 3, x: 'pINg'} mongo-2.5.1/spec/support/crud_tests/write/updateMany-collation.yml0000644000004100000410000000146413257253113025470 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 'ping'} - {_id: 3, x: 'pINg'} minServerVersion: '3.4' tests: - description: "UpdateMany when many documents match with collation" operation: name: "updateMany" arguments: filter: x: 'ping' update: $set: {x: 'pong'} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: matchedCount: 2 modifiedCount: 2 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 'pong'} - {_id: 3, x: 'pong'} mongo-2.5.1/spec/support/crud_tests/write/updateMany-pre_2.6.yml0000644000004100000410000000460213257253113024654 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} # This file includes the same test cases as updateMany.yml with some omissions # for pre-2.6 servers. We cannot verify the update result's modifiedCount as it # is not available with legacy write operations and getLastError. maxServerVersion: '2.4.99' tests: - description: "UpdateMany when many documents match" operation: name: "updateMany" arguments: filter: _id: {$gt: 1} update: $inc: {x: 1} outcome: result: matchedCount: 2 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 23} - {_id: 3, x: 34} - description: "UpdateMany when one document matches" operation: name: "updateMany" arguments: filter: {_id: 1} update: $inc: {x: 1} outcome: result: matchedCount: 1 upsertedCount: 0 collection: data: - {_id: 1, x: 12} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateMany when no documents match" operation: name: "updateMany" arguments: filter: {_id: 4} update: $inc: {x: 1} outcome: result: matchedCount: 0 upsertedCount: 0 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "UpdateMany with upsert when no documents match" operation: name: "updateMany" arguments: filter: {_id: 4} update: $inc: {x: 1} upsert: true outcome: result: matchedCount: 0 upsertedCount: 1 upsertedId: 4 collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 1} mongo-2.5.1/spec/support/crud_tests/write/insertMany.yml0000644000004100000410000000101213257253113023515 0ustar www-datawww-datadata: - {_id: 1, x: 11} tests: - description: "InsertMany with non-existing documents" operation: name: "insertMany" arguments: documents: - {_id: 2, x: 22} - {_id: 3, x: 33} outcome: result: insertedIds: { 0: 2, 1: 3 } collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33}mongo-2.5.1/spec/support/crud_tests/write/findOneAndUpdate-arrayFilters.yml0000644000004100000410000000360713257253113027215 0ustar www-datawww-datadata: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} minServerVersion: '3.5.6' tests: - description: "FindOneAndUpdate when no document matches arrayFilters" operation: name: findOneAndUpdate arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 4} outcome: result: _id: 1 y: - {b: 3} - {b: 1} collection: data: - {_id: 1, y: [{b: 3}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - description: "FindOneAndUpdate when one document matches arrayFilters" operation: name: findOneAndUpdate arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 3} outcome: result: _id: 1 y: - {b: 3} - {b: 1} collection: data: - {_id: 1, y: [{b: 2}, {b: 1}]} - {_id: 2, y: [{b: 0}, {b: 1}]} - description: "FindOneAndUpdate when multiple documents match arrayFilters" operation: name: findOneAndUpdate arguments: filter: {} update: $set: {"y.$[i].b": 2} arrayFilters: - {i.b: 1} outcome: result: _id: 1 y: - {b: 3} - {b: 1} collection: data: - {_id: 1, y: [{b: 3}, {b: 2}]} - {_id: 2, y: [{b: 0}, {b: 1}]} mongo-2.5.1/spec/support/crud_tests/write/findOneAndReplace-upsert_pre_2.6.yml0000644000004100000410000000651713257253113027457 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} # This file includes the same test cases as findOneAndReplace-upsert.yml with # some omissions for pre-2.6 servers. We cannot verify the ID of an upserted # document in some cases due to SERVER-5289. maxServerVersion: '2.4.99' tests: - description: "FindOneAndReplace when no documents match without id specified with upsert returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} # Omit the sort option as it has no effect when no documents # match and would only cause an inconsistent return value on # pre-3.0 servers when combined with returnDocument "before" # (see: SERVER-17650). upsert: true outcome: result: null # Can't verify collection data because server versions before 2.6 do # not take the _id from the filter document during an upsert (see: # SERVER-5289). - description: "FindOneAndReplace when no documents match without id specified with upsert returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {x: 44} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} upsert: true outcome: result: {x: 44} # Can't verify collection data because server versions before 2.6 do # not take the _id from the filter document during an upsert (see: # SERVER-5289). - description: "FindOneAndReplace when no documents match with id specified with upsert returning the document before modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {_id: 4, x: 44} projection: {x: 1, _id: 0} # Omit the sort option as it has no effect when no documents # match and would only cause an inconsistent return value on # pre-3.0 servers when combined with returnDocument "before" # (see: SERVER-17650). upsert: true outcome: result: null collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} - description: "FindOneAndReplace when no documents match with id specified with upsert returning the document after modification" operation: name: findOneAndReplace arguments: filter: {_id: 4} replacement: {_id: 4, x: 44} projection: {x: 1, _id: 0} returnDocument: After sort: {x: 1} upsert: true outcome: result: {x: 44} collection: data: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} mongo-2.5.1/spec/support/crud_tests/read/0000755000004100000410000000000013257253113020430 5ustar www-datawww-datamongo-2.5.1/spec/support/crud_tests/read/aggregate-collation.yml0000644000004100000410000000074713257253113025073 0ustar www-datawww-datadata: - {_id: 1, x: 'ping'} minServerVersion: '3.4' tests: - description: "Aggregate with collation" operation: name: aggregate arguments: pipeline: - $match: x: 'PING' collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: - {_id: 1, x: 'ping'} mongo-2.5.1/spec/support/crud_tests/read/count.yml0000644000004100000410000000131413257253113022302 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "Count without a filter" operation: name: count arguments: filter: { } outcome: result: 3 - description: "Count with a filter" operation: name: count arguments: filter: _id: {$gt: 1} outcome: result: 2 - description: "Count with skip and limit" operation: name: count arguments: filter: {} skip: 1 limit: 3 outcome: result: 2mongo-2.5.1/spec/support/crud_tests/read/find-collation.yml0000644000004100000410000000065313257253113024061 0ustar www-datawww-datadata: - {_id: 1, x: 'ping'} minServerVersion: '3.4' tests: - description: "Find with a collation" operation: name: "find" arguments: filter: {x: 'PING'} collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: - {_id: 1, x: 'ping'} mongo-2.5.1/spec/support/crud_tests/read/aggregate-out.yml0000644000004100000410000000246413257253113023714 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} minServerVersion: '2.6' tests: - description: "Aggregate with $out" operation: name: aggregate arguments: pipeline: - $sort: {x: 1} - $match: _id: {$gt: 1} - $out: "other_test_collection" batchSize: 2 outcome: result: - {_id: 2, x: 22} - {_id: 3, x: 33} collection: name: "other_test_collection" data: - {_id: 2, x: 22} - {_id: 3, x: 33} - description: "Aggregate with $out and batch size of 0" operation: name: aggregate arguments: pipeline: - $sort: {x: 1} - $match: _id: {$gt: 1} - $out: "other_test_collection" batchSize: 0 outcome: result: - {_id: 2, x: 22} - {_id: 3, x: 33} collection: name: "other_test_collection" data: - {_id: 2, x: 22} - {_id: 3, x: 33} mongo-2.5.1/spec/support/crud_tests/read/count-collation.yml0000644000004100000410000000061013257253113024262 0ustar www-datawww-datadata: - {_id: 1, x: 'PING'} minServerVersion: '3.4' tests: - description: "Count with collation" operation: name: count arguments: filter: { x: 'ping' } collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: 1 mongo-2.5.1/spec/support/crud_tests/read/distinct-collation.yml0000644000004100000410000000071113257253113024755 0ustar www-datawww-datadata: - {_id: 1, string: 'PING'} - {_id: 2, string: 'ping'} minServerVersion: '3.4' tests: - description: "Distinct with a collation" operation: name: distinct arguments: fieldName: "string" collation: { locale: 'en_US', strength: 2 } # https://docs.mongodb.com/master/reference/collation/#collation-document outcome: result: - 'PING' mongo-2.5.1/spec/support/crud_tests/read/find.yml0000644000004100000410000000213013257253113022067 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} - {_id: 5, x: 55} tests: - description: "Find with filter" operation: name: "find" arguments: filter: {_id: 1} outcome: result: - {_id: 1, x: 11} - description: "Find with filter, sort, skip, and limit" operation: name: "find" arguments: filter: _id: {$gt: 2} sort: {_id: 1} skip: 2 limit: 2 outcome: result: - {_id: 5, x: 55} - description: "Find with limit, sort, and batchsize" operation: name: "find" arguments: filter: {} sort: {_id: 1} limit: 4 batchSize: 2 outcome: result: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} - {_id: 4, x: 44} mongo-2.5.1/spec/support/crud_tests/read/aggregate.yml0000644000004100000410000000073413257253113023105 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "Aggregate with multiple stages" operation: name: aggregate arguments: pipeline: - $sort: {x: 1} - $match: _id: {$gt: 1} batchSize: 2 outcome: result: - {_id: 2, x: 22} - {_id: 3, x: 33} mongo-2.5.1/spec/support/crud_tests/read/distinct.yml0000644000004100000410000000121713257253113022775 0ustar www-datawww-datadata: - {_id: 1, x: 11} - {_id: 2, x: 22} - {_id: 3, x: 33} tests: - description: "Distinct without a filter" operation: name: distinct arguments: fieldName: "x" filter: {} outcome: result: - 11 - 22 - 33 - description: "Distinct with a filter" operation: name: distinct arguments: fieldName: "x" filter: _id: {$gt: 1} outcome: result: - 22 - 33mongo-2.5.1/spec/support/sdam_monitoring.rb0000644000004100000410000001024713257253113021060 0ustar www-datawww-data# Copyright (C) 2014-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # RSpec::Matchers.define :match_topology_opening_event do |expectation| match do |event| event.topology != nil end end RSpec::Matchers.define :match_topology_description_changed_event do |expectation| include Mongo::SDAMMonitoring::Matchable match do |event| topologies_match?(event, expectation) end end RSpec::Matchers.define :match_server_opening_event do |expectation| match do |event| true end end RSpec::Matchers.define :match_server_description_changed_event do |expectation| include Mongo::SDAMMonitoring::Matchable match do |event| descriptions_match?(event, expectation) end end RSpec::Matchers.define :match_server_closed_event do |expectation| match do |event| event.address.to_s == expectation.data['address'] end end RSpec::Matchers.define :match_sdam_monitoring_event do |expectation| match do |event| expect(event).to send("match_#{expectation.name}", expectation) end end module Mongo module SDAMMonitoring module Matchable def descriptions_match?(event, expectation) description_matches?(event.previous_description, expectation.data['previousDescription']) && description_matches?(event.new_description, expectation.data['newDescription']) end def topologies_match?(event, expectation) topology_matches?(event.previous_topology, expectation.data['previousDescription']) && topology_matches?(event.new_topology, expectation.data['newDescription']) end def description_matches?(actual, expected) case expected['type'] when 'Standalone' then actual.standalone? when 'RSPrimary' then actual.primary? when 'RSSecondary' then actual.secondary? when 'RSArbiter' then actual.arbiter? when 'Mongos' then actual.mongos? when 'Unknown' then actual.unknown? when 'PossiblePrimary' then actual.unknown? when 'RSGhost' then actual.ghost? when 'RSOther' then actual.other? end end def topology_matches?(actual, expected) case expected['topologyType'] when 'ReplicaSetWithPrimary' then actual.replica_set? when 'ReplicaSetNoPrimary' then (actual.replica_set? || actual.unknown?) when 'Sharded' then actual.sharded? when 'Single' then actual.single? when 'Unknown' then actual.unknown? end end end # Test subscriber for SDAM monitoring. # # @since 2.4.0 class TestSubscriber # The mappings of event names to types. # # @since 2.4.0 MAPPINGS = { 'topology_opening_event' => Mongo::Monitoring::Event::TopologyOpening, 'topology_description_changed_event' => Mongo::Monitoring::Event::TopologyChanged, 'server_opening_event' => Mongo::Monitoring::Event::ServerOpening, 'server_description_changed_event' => Mongo::Monitoring::Event::ServerDescriptionChanged, 'server_closed_event' => Mongo::Monitoring::Event::ServerClosed }.freeze # Implement the succeeded event. # # @param [ Event ] event The event. # # @since 2.4.0 def succeeded(event) events.push(event) end # Get the first event fired for the name, and then delete it. # # @param [ String ] name The event name. # # @return [ Event ] The matching event. def first_event(name) matching = events.find do |event| event.class == MAPPINGS[name] end events.delete(matching) matching end private def events @events ||= [] end end end end mongo-2.5.1/spec/support/travis.rb0000644000004100000410000000053513257253113017176 0ustar www-datawww-data# start up on the travis ci environment. if (ENV['CI'] == 'travis') starting = true client = Mongo::Client.new(['127.0.0.1:27017']) while starting begin client.command(Mongo::Server::Monitor::Connection::ISMASTER) break rescue Mongo::Error::OperationFailure => e sleep(2) client.cluster.scan! end end end mongo-2.5.1/spec/support/shared/0000755000004100000410000000000013257253113016604 5ustar www-datawww-datamongo-2.5.1/spec/support/shared/protocol.rb0000644000004100000410000000136313257253113020775 0ustar www-datawww-datashared_examples 'message with a header' do describe 'header' do describe 'length' do let(:field) { bytes.to_s[0..3] } it 'serializes the length' do expect(field).to be_int32(bytes.length) end end describe 'request id' do let(:field) { bytes.to_s[4..7] } it 'serializes the request id' do expect(field).to be_int32(message.request_id) end end describe 'response to' do let(:field) { bytes.to_s[8..11] } it 'serializes the response to' do expect(field).to be_int32(0) end end describe 'op code' do let(:field) { bytes.to_s[12..15] } it 'serializes the op code' do expect(field).to be_int32(opcode) end end end end mongo-2.5.1/spec/support/shared/server_selector.rb0000644000004100000410000001034613257253113022343 0ustar www-datawww-datadef make_server(mode, options = {}) tags = options[:tags] || {} average_round_trip_time = options[:average_round_trip_time] || 0 ismaster = { 'setName' => 'mongodb_set', 'ismaster' => mode == :primary, 'secondary' => mode != :primary, 'tags' => tags, 'ok' => 1 } listeners = Mongo::Event::Listeners.new monitoring = Mongo::Monitoring.new address = options[:address] server = Mongo::Server.new(address, double('cluster'), monitoring, listeners, TEST_OPTIONS) description = Mongo::Server::Description.new(address, ismaster, average_round_trip_time) server.tap do |s| allow(s).to receive(:description).and_return(description) end end shared_context 'server selector' do let(:max_staleness) { nil } let(:tag_sets) { [] } let(:tag_set) do { 'test' => 'tag' } end let(:server_tags) do { 'test' => 'tag', 'other' => 'tag' } end let(:primary) { make_server(:primary) } let(:secondary) { make_server(:secondary) } let(:options) { { :mode => name, :tag_sets => tag_sets, max_staleness: max_staleness } } let(:selector) { described_class.new(options) } let(:monitoring) do Mongo::Monitoring.new(monitoring: false) end let(:topology) do double('topology') end before(:all) do module Mongo # We monkey-patch the server here, so the monitors do not run and no # real TCP connection is attempted. # # @since 2.1.0 class Server alias :original_initialize :initialize def initialize(address, cluster, monitoring, event_listeners, options = {}) @address = address @cluster = cluster @monitoring = monitoring @options = options.freeze @monitor = Monitor.new(address, event_listeners, options) end alias :original_disconnect! :disconnect! def disconnect!; true; end end end end after(:all) do # Return the server implementation to its original for the other # tests in the suite. module Mongo class Server alias :initialize :original_initialize remove_method(:original_initialize) alias :disconnect! :original_disconnect! remove_method(:original_disconnect!) end end end end shared_examples 'a server selector mode' do describe '#name' do it 'returns the name' do expect(selector.name).to eq(name) end end describe '#slave_ok?' do it 'returns whether the slave_ok bit should be set' do expect(selector.slave_ok?).to eq(slave_ok) end end describe '#==' do context 'when mode is the same' do let(:other) do described_class.new end context 'tag sets are the same' do it 'returns true' do expect(selector).to eq(other) end end end context 'mode is different' do let(:other) do described_class.new.tap do |sel| allow(sel).to receive(:name).and_return(:other_mode) end end it 'returns false' do expect(selector).not_to eq(other) end end end end shared_examples 'a server selector accepting tag sets' do describe '#tag_sets' do context 'tags not provided' do it 'returns an empty array' do expect(selector.tag_sets).to be_empty end end context 'tag sets provided' do let(:tag_sets) do [ tag_set ] end it 'returns the tag sets' do expect(selector.tag_sets).to eq(tag_sets) end end end describe '#==' do context 'when mode is the same' do let(:other) { described_class.new } context 'tag sets are different' do let(:tag_sets) { { 'other' => 'tag' } } it 'returns false' do expect(selector).not_to eq(other) end end end end end shared_examples 'a server selector with sensitive data in its options' do describe '#inspect' do context 'when there is sensitive data in the options' do let(:options) do Mongo::Options::Redacted.new(:mode => name, :password => 'sensitive_data') end it 'does not print out sensitive data' do expect(selector.inspect).not_to match(options[:password]) end end end end mongo-2.5.1/spec/support/shared/session.rb0000644000004100000410000004675113257253113020631 0ustar www-datawww-datashared_examples 'an operation using a session' do describe 'operation execution', if: test_sessions? do context 'when the session is created from the same client used for the operation' do let(:session) do client.start_session end let(:server_session) do session.instance_variable_get(:@server_session) end let!(:before_last_use) do server_session.last_use end let!(:before_operation_time) do (session.operation_time || 0) end let!(:operation_result) do operation end after do session.end_session end it 'updates the last use value' do expect(server_session.last_use).not_to eq(before_last_use) end it 'updates the operation time value' do expect(session.operation_time).not_to eq(before_operation_time) end it 'does not close the session when the operation completes' do expect(session.ended?).to be(false) end end context 'when a session from another client is provided' do let(:session) do authorized_client_with_retry_writes.start_session end let(:operation_result) do operation end it 'raises an exception' do expect { operation_result }.to raise_exception(Mongo::Error::InvalidSession) end end context 'when the session is ended before it is used' do let(:session) do client.start_session end before do session.end_session end let(:operation_result) do operation end it 'raises an exception' do expect { operation_result }.to raise_exception(Mongo::Error::InvalidSession) end end end end shared_examples 'a failed operation using a session' do context 'when the operation fails', if: test_sessions? do let!(:before_last_use) do session.instance_variable_get(:@server_session).last_use end let!(:before_operation_time) do (session.operation_time || 0) end let!(:operation_result) do sleep 0.2 begin; failed_operation; rescue => e; e; end end let(:session) do client.start_session end it 'raises an error' do expect([Mongo::Error::OperationFailure, Mongo::Error::BulkWriteError]).to include(operation_result.class) end it 'updates the last use value' do expect(session.instance_variable_get(:@server_session).last_use).not_to eq(before_last_use) end it 'updates the operation time value' do expect(session.operation_time).not_to eq(before_operation_time) end end end shared_examples 'a causally consistent client session with an unacknowledged write' do context 'when an unacknowledged write is executed in the context of a causally consistent session', if: sessions_enabled? do let(:session) do client.start_session(causal_consistency: true) end it 'does not update the operation time of the session' do operation expect(session.operation_time).to be_nil end end end shared_examples 'an operation supporting causally consistent reads' do let(:client) do subscribed_client end context 'when connected to a standalone', if: sessions_enabled? && standalone? do context 'when the collection specifies a read concern' do let(:collection) do client[TEST_COLL, read_concern: { level: 'majority' }] end context 'when the session has causal_consistency set to true' do let(:session) do client.start_session(causal_consistency: true) end it 'does not add the afterClusterTime to the read concern in the command' do expect(command['readConcern']['afterClusterTime']).to be_nil end end context 'when the session has causal_consistency set to false' do let(:session) do client.start_session(causal_consistency: false) end it 'does not add the afterClusterTime to the read concern in the command' do expect(command['readConcern']['afterClusterTime']).to be_nil end end context 'when the session has causal_consistency not set' do let(:session) do client.start_session end it 'does not add the afterClusterTime to the read concern in the command' do expect(command['readConcern']['afterClusterTime']).to be_nil end end end context 'when the collection does not specify a read concern' do let(:collection) do client[TEST_COLL] end context 'when the session has causal_consistency set to true' do let(:session) do client.start_session(causal_consistency: true) end it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the session has causal_consistency set to false' do let(:session) do client.start_session(causal_consistency: false) end it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the session has causal_consistency not set' do let(:session) do client.start_session end it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end end end context 'when connected to replica set or sharded cluster', if: test_sessions? do context 'when the collection specifies a read concern' do let(:collection) do client[TEST_COLL, read_concern: { level: 'majority' }] end context 'when the session has causal_consistency set to true' do let(:session) do client.start_session(causal_consistency: true) end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end let!(:operation_time) do session.operation_time end let(:expected_read_concern) do BSON::Document.new(level: 'majority', afterClusterTime: operation_time) end it 'merges the afterClusterTime with the read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the session does not have an operation time' do let(:expected_read_concern) do BSON::Document.new(level: 'majority') end it 'leaves the read concern document unchanged' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(level: 'majority', afterClusterTime: operation_time) end it 'merges the afterClusterTime with the new operation time and read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end end context 'when the session has causal_consistency set to false' do let(:session) do client.start_session(causal_consistency: false) end context 'when the session does not have an operation time' do let(:expected_read_concern) do BSON::Document.new(level: 'majority') end it 'leaves the read concern document unchanged' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end let(:expected_read_concern) do BSON::Document.new(level: 'majority') end it 'leaves the read concern document unchanged' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(level: 'majority') end it 'leaves the read concern document unchanged' do expect(command['readConcern']).to eq(expected_read_concern) end end end context 'when the session has causal_consistency not set' do let(:session) do client.start_session end context 'when the session does not have an operation time' do let(:expected_read_concern) do BSON::Document.new(level: 'majority') end it 'leaves the read concern document unchanged' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end let!(:operation_time) do session.operation_time end let(:expected_read_concern) do BSON::Document.new(level: 'majority', afterClusterTime: operation_time) end it 'merges the afterClusterTime with the new operation time and read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(level: 'majority', afterClusterTime: operation_time) end it 'merges the afterClusterTime with the new operation time and read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end end end context 'when the collection does not specify a read concern' do let(:collection) do client[TEST_COLL] end context 'when the session has causal_consistency set to true' do let(:session) do client.start_session(causal_consistency: true) end context 'when the session does not have an operation time' do it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end let!(:operation_time) do session.operation_time end let(:expected_read_concern) do BSON::Document.new(afterClusterTime: operation_time) end it 'merges the afterClusterTime with the read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(afterClusterTime: operation_time) end it 'merges the afterClusterTime with the new operation time in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end end context 'when the session has causal_consistency set to false' do let(:session) do client.start_session(causal_consistency: false) end context 'when the session does not have an operation time' do it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(afterClusterTime: operation_time) end it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end end context 'when the session has causal_consistency not set' do let(:session) do client.start_session end context 'when the session does not have an operation time' do it 'does not include the read concern in the command' do expect(command['readConcern']).to be_nil end end context 'when the session has an operation time' do before do client.database.command({ ping: 1 }, session: session) end let!(:operation_time) do session.operation_time end let(:expected_read_concern) do BSON::Document.new(afterClusterTime: operation_time) end it 'merges the afterClusterTime with the read concern in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end context 'when the operation time is advanced' do before do session.advance_operation_time(operation_time) end let(:operation_time) do BSON::Timestamp.new(0, 1) end let(:expected_read_concern) do BSON::Document.new(afterClusterTime: operation_time) end it 'merges the afterClusterTime with the new operation time in the command' do expect(command['readConcern']).to eq(expected_read_concern) end end end end end end shared_examples 'an operation updating cluster time' do let(:cluster) do client.cluster end let(:session) do client.start_session end let(:client) do subscribed_client end context 'when the command is run once' do context 'when the server is version 3.6' do context 'when the cluster is sharded or a replica set', if: test_sessions? do let!(:reply_cluster_time) do operation_with_session EventSubscriber.succeeded_events[-1].reply['$clusterTime'] end it 'updates the cluster time of the cluster' do expect(cluster.cluster_time).to eq(reply_cluster_time) end it 'updates the cluster time of the session' do expect(session.cluster_time).to eq(reply_cluster_time) end end context 'when the server is a standalone', if: (standalone? && sessions_enabled?) do let(:before_cluster_time) do client.cluster.cluster_time end let!(:reply_cluster_time) do operation_with_session EventSubscriber.succeeded_events[-1].reply['$clusterTime'] end it 'does not update the cluster time of the cluster' do expect(before_cluster_time).to eq(before_cluster_time) end it 'does not update the cluster time of the session' do expect(session.cluster_time).to be_nil end end end context 'when the server is less than version 3.6', if: !sessions_enabled? do let(:before_cluster_time) do client.cluster.cluster_time end let!(:reply_cluster_time) do operation EventSubscriber.succeeded_events[-1].reply['$clusterTime'] end it 'does not update the cluster time of the cluster' do expect(before_cluster_time).to eq(before_cluster_time) end end end context 'when the command is run twice' do let!(:reply_cluster_time) do operation_with_session EventSubscriber.succeeded_events[-1].reply['$clusterTime'] end context 'when the cluster is sharded or a replica set', if: test_sessions? do context 'when the session cluster time is advanced' do before do session.advance_cluster_time(advanced_cluster_time) end let(:second_command_cluster_time) do second_operation EventSubscriber.started_events[-1].command['$clusterTime'] end context 'when the advanced cluster time is greater than the existing cluster time' do let(:advanced_cluster_time) do new_timestamp = BSON::Timestamp.new(reply_cluster_time[Mongo::Cluster::CLUSTER_TIME].seconds, reply_cluster_time[Mongo::Cluster::CLUSTER_TIME].increment + 1) new_cluster_time = reply_cluster_time.dup new_cluster_time.merge(Mongo::Cluster::CLUSTER_TIME => new_timestamp) end it 'includes the advanced cluster time in the second command' do expect(second_command_cluster_time).to eq(advanced_cluster_time) end end context 'when the advanced cluster time is not greater than the existing cluster time' do let(:advanced_cluster_time) do new_timestamp = BSON::Timestamp.new(reply_cluster_time[Mongo::Cluster::CLUSTER_TIME].seconds, reply_cluster_time[Mongo::Cluster::CLUSTER_TIME].increment - 1) new_cluster_time = reply_cluster_time.dup new_cluster_time.merge(Mongo::Cluster::CLUSTER_TIME => new_timestamp) end it 'does not advance the cluster time' do expect(second_command_cluster_time).to eq(reply_cluster_time) end end end context 'when the session cluster time is not advanced' do let(:second_command_cluster_time) do second_operation EventSubscriber.started_events[-1].command['$clusterTime'] end it 'includes the received cluster time in the second command' do expect(second_command_cluster_time).to eq(reply_cluster_time) end end end context 'when the server is a standalone', if: (standalone? && sessions_enabled?) do let(:before_cluster_time) do client.cluster.cluster_time end let(:second_command_cluster_time) do second_operation EventSubscriber.started_events[-1].command['$clusterTime'] end it 'does not update the cluster time of the cluster' do second_command_cluster_time expect(before_cluster_time).to eq(before_cluster_time) end end end context 'when the server is less than version 3.6', if: !sessions_enabled? do let(:before_cluster_time) do client.cluster.cluster_time end it 'does not update the cluster time of the cluster' do operation expect(before_cluster_time).to eq(before_cluster_time) end end end mongo-2.5.1/spec/support/certificates/0000755000004100000410000000000013257253113020003 5ustar www-datawww-datamongo-2.5.1/spec/support/certificates/client_key_encrypted.pem0000644000004100000410000000332713257253113024716 0ustar www-datawww-data-----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: DES-EDE3-CBC,10EC04AAF2B5AC93 o8wl+xljXAiT+wCc0trCSaQ+0vktQXCXDQj2x/pEMwS5+GxhPaQmpQjtc2gsjher PGu5rSG0eqIGROv530ZbtdWdMeRRbiDfcVrbjr2hg9MdasV4/7llVCNnZx2NuuuU 0ToH6a1JtnnPLqocdJfwJu/w+AMpJBCR9im+L4QeVJ0My4nS9VzL214ioxfmKtiS /DPRm9NQxIRbOFzPLT4Wp2kaGyq6z4MyW7I16MEIjxNfg7MoojkOosp2ru7QEO5N CjlsInC3kJjSimRh479Ba5rnFLY6ekwRkFzbzPNnPz1Mbpv86Btqx8dNERfOGRMd JLjLSWbeO6nZmRIBuItChVfpbiVV+3eQvQHIsTFN/pD0hbDmI0FQ8TiEwxe4dNGu LGgQwI9BIVT9ZDTahe0182Cb7WS1KA7VrixRV3zm2NSgrP8D9yH6/BlogdRtjCc4 ktGjZJtqojEAdeYM7qofGhZjq50Pwy2EAyV0UXGwg3MblrobanAq6mezmPRDPFzb iEHLK9N5xqddcl8dIhd5vk84MI7Ih7kc7E3MdbRAyWrSPRk+eFqy6Bfuz6JSrHMO 6Beyfz6dzU5XUCo/feh6qmDEA5COuP1JHE8UhSC8s/wRk94qZdLz2jIglPSJ2Zm7 VrP1vHIU8rghO4JWE2K3HPAASBacYhfoW6Po+vmxeRttAao9xhFBhM+OjUszh+1w YSkv6TXpHZTfE0ai2Sm1yYuB5FuCco1ehPGFLf+a9dSv5xT+3qAqM9pkbYDPDSyS RAfSiy/cIzjIfk/a/2A7F56b/clZV3V/DRfgCMdcCHSrjJ/DXmg7s5pCqNh3Y2RM Izyb7XAWXQh+VrvrLSFolY6CFTAYq2bnqkMIel1t/MusF8P2Cpf+1YFQOz/KhtqS S51JZw4wA8Hq/LOOsmB1BzPl63Yu8rj+hCMN8yEodY1yOwrLX1c6/c+4A6U8JioL dya58byVF5Fov+moO5PgkD5QsuHDZpkdQeWjZXtK9ZoG6HG5godFip6hxIrV8LTl Fy9TEURPPYfM9Kt3zucL1KOljBu268tBfezqwPLpFVYuIDSUdTHdCBIRJvJwBfrU 9gZXFik0ZgP2WSlGJbrUTGfCv7EcdPfDS3h6dPfh8Z33Kr9g+yKp2Gxm6ynQi2EU T86tMUFaeIZ5VD14ruihlQyazPePYa95FUoVWRnnoxXrCEGo3PU2sL5vXOjr1zr3 dnZheTNH7UW+O4nLwpZ8WI+IYqIOh6s5Aj9ZQpuPfhgFOOqqwrEbCh88W6q5Qz82 J2U9ok/RWTgc433tW4xKoN10p6IdF/MxMrPSV+LdVnYf1HBl5I1A/iZRHxdXkJpZ KcQ6VAQylmPGHHacxWodn2R9aFxy/LoSDSzivhxE+CDF6Ggr+DwyWPa6dHLP9S0n H9PfOPQLIu2uigpQc4GJoAZOljNaYDee/IekJ+vR6vzg09Uc1YDIVWfGmNJpYuVr rz383c+sGC3RX2cPyxuzENLo1tGYvjH0Rfvzjfkvali2Tfk3kjha0GhTw7CH7U3L 5rb5GQj16tDX/Oqa2DluESH3tzDJ7Rgdwda0Dk3iceVEsSHArt6TOz/qa+Ycm/EC -----END RSA PRIVATE KEY----- mongo-2.5.1/spec/support/certificates/client_key.pem0000644000004100000410000000325013257253113022634 0ustar www-datawww-data-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9 jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+ flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4 H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m 2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4 tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/ vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s 9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop 59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4 9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9 SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU 0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52 re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3 57rGT6p0OuM8qbrTzpv3JMrm -----END PRIVATE KEY----- mongo-2.5.1/spec/support/certificates/client.pem0000644000004100000410000001304213257253113021764 0ustar www-datawww-dataCertificate: Data: Version: 3 (0x2) Serial Number: 7 (0x7) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus Validity Not Before: Aug 23 14:55:32 2013 GMT Not After : Jan 7 14:55:32 2041 GMT Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a: 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1: 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51: 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f: 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79: 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c: 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae: 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd: c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53: a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31: 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35: 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98: be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18: b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe: a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15: 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2: 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4: 6e:a7 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: OpenSSL Generated Certificate X509v3 Subject Key Identifier: 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C X509v3 Authority Key Identifier: keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16 Signature Algorithm: sha1WithRSAEncryption 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d: f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5: f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db: af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87: 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35: 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa: 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85: 24:18 -----BEGIN CERTIFICATE----- MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0 NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG 9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54 xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh 9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9 jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+ flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4 H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m 2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4 tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/ vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s 9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop 59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4 9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9 SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU 0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52 re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3 57rGT6p0OuM8qbrTzpv3JMrm -----END PRIVATE KEY----- mongo-2.5.1/spec/support/certificates/password_protected.pem0000644000004100000410000000602313257253113024422 0ustar www-datawww-data-----BEGIN ENCRYPTED PRIVATE KEY----- MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIgWTIkEmBBfoCAggA MBQGCCqGSIb3DQMHBAjzL6xrCrEygwSCBMihG8kg3nTnTtWAbB+d1D+HJxriqm37 7rwjkfa+T5w5ZBRGpsTt3QB5ep0maX72H55ns6ukkeMoDBSadhDWrGWcLQ2IOGt3 E14KU6vMFe3gQkfF1fupp7F+3ma58/VNUKa4X5pzZ7OCf8inlLWejp8BRqbrPWqw Errgw1kNN3gWfQMr7JtIt1yI1xIMEB2Z976Jn0gaGnJAtzIW4thqjkDdb8b33S9f cb7N1Fq4cly22f9HdqNcLgVTi1zIlPXc/f/6mtsGTsJv/rMPthJ7c3Smvh3Fce2G w8e+ypfey+9QG3fk7RslaFRe8ShgqfdR8CAalp2UzwNbX91Agyuim3TA6s4jM8N9 cF6CXlqEaA4sKhiOJmw69DfTC7QRee/gi2A8bz17pX85nKrGiLYn+Od8CEhTFxVk lNgBLv4+RcYHVqxWlbJMdDliMN53E+hYbh0y+GDLjteEXbrxRo1aSgd/9PGiSl97 KY4F7b/OwRzRZh1F+cXY+uP5ZQMbx5EMMkhzuj3Hiy/AVlQrW2B1lXtcf11YFFJj xWq6YcpmEjL+xRq1PgoU7ahl6K0A3ScedQA5b1rLdPE8+bkRAfoN+0r8HVkIL7M+ PorrwuWnvUmovZ0yDvm153HVvRnKZKHcelklphuUWfXvcRNITG/Rx6ssj+MVjqjb Xy7t7wgIrk10TFWNEcunGjSSjPDkjYPazJ2dasI0rODzhlQzrnlWM+El9P5zSu2z 1Bvet44nmAKi2WLMda5YKbJcLSNbpBFB+rTwDt/D+dfwsJeC0sjpzzatKGXNJLJQ 7x9BZfAbBn0QrIZYGMkaxWvcpJcaVUbCKiST4DK5ze584ptrlH+Bqw4u4xLcVrdk hu/8IBNybLrl4zahIz7bRRNmw5wo9zUVXPXEtuYak+MK+gmD3TzJ12OUKAlAj3Go Fj3NFQoxBJJjuXM3zZRvHp+/AAOUANBYIyV2WssF6C+SH4o+jKyxWC/GawPFvx/B gy55kdEt+ORdcOfV8L5Q2xI8Qpck6E3odmaHCvjz1bUVUWqhJcTuoewHRBfWiWgc UCXBS/YgendUQroBOPyYIwTtk4XY9fhhKGI4LhWcx4LfzntBnM9FGmDOwhu3HqEd HOs8p+HhB8LPjGRot63m7gkJ1T6AswSi9hTeZeSgXuSgL23zqwPGbGTwO3AmFs/M 8luXQ4My9bk74K3d9lFdJPaxeTpeeWNodnBItbioT5aImptU+pkKWLTVmXi4V+JE 1ootg+DSbz+bKp4A/LLOBO4Rsx5FCGAbBMnKc/n8lF86LjKq2PLRfgdPCaVfBrcd TnOkBZYU0HwJAc++4AZQJvA/KRB4UPUzMe2atjVxcrr6r6vL8G04+7TBFoynpzJ+ 4KZPCJz0Avb4wYKu/IHkdKL7UY8WEGz1mMDbAu4/xCriLg49D2f1eY3FTEjBotBI J9hE4ccmwqlxtl4qCVRezh0C+viJ6q2tCji2SPQviaVMNWiis9cZ52J+F9TC2p9R PdatJg0rjuVzfoPFE8Rq8V6+zf818b19vQ4F31J+VXTz7sF8it9IO0w/3MbtfBNE pKmMZ9h5RdSw1kXRWXbROR9XItS7gE1wkXAxw11z7jqNSNvhotkJXH/A5qGpTFBl Z8A= -----END ENCRYPTED PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDczCCAtygAwIBAgIBCzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1 MTgxMFoXDTQxMDQyMjE1MTgxMFowazELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP MA0GA1UECwwGS2VybmVsMRAwDgYDVQQDDAdsYXphcnVzMIIBIjANBgkqhkiG9w0B AQEFAAOCAQ8AMIIBCgKCAQEA0+uq+UcogTSS+BLNTwwsBU7/HnNNhNgLKnk8pdUC UFOzAjXnXlXEravmbhWeIj5TsCElc5FPE66OvmiixFU6l27Z5P8gopjokxll7e1B ujeJOXgy5h+K76xdeQ90JmQX4OO0K5rLXvNH3ufuhGr2NObrBz6kbF5Wdr3urPl6 pFSLH02zPLqPHhhUvO8jcbUD3RrS/5ZGHqE++F+QRMuYeCXTjECA8iLDvQsiqvT6 qK1y04V/8K0BYJd/yE31H3cvRLUu7mRAkN87lY1Aj0i3dKM/l2RAa3tsy2/kSDH3 VeUaqjoPN8PTfJaoMZz7xV7C+Zha+JZh3E7pq6viMR6bkwIDAQABo3sweTAJBgNV HRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZp Y2F0ZTAdBgNVHQ4EFgQUbw3OWXLJpkDMpGnLWM4vxSbwUSAwHwYDVR0jBBgwFoAU B0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAL+OC9x0P7Ql+ 8NbONrIeOIoJD++K5rUM0vI+u9RDAxTm9TO6cP7Cl6H4zzvlzJ3w9DL66c2r+ZTy BxzFO1wtDKUo5RJKneC0tMz0rJQIWTqo45fDLs8UIDB5t4xp6zed34nvct+wIRaV hCjHBaVmILlBWb6OF9/kl1JhLtElyDs= -----END CERTIFICATE----- mongo-2.5.1/spec/support/certificates/server.pem0000644000004100000410000000371213257253113022017 0ustar www-datawww-data-----BEGIN PRIVATE KEY----- MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAK53miP9GczBWXnq NxHwQkgVqsDuesjwJbWilMK4gf3fjnf2PN3qDpnGbZbPD0ij8975pIKtSPoDycFm A8Mogip0yU2Lv2lL56CWthSBftOFDL2CWIsmuuURFXZPiVLtLytfI9oLASZFlywW Cs83qEDTvdW8VoVhVsxV1JFDnpXLAgMBAAECgYBoGBgxrMt97UazhNkCrPT/CV5t 6lv8E7yMGMrlOyzkCkR4ssQyK3o2qbutJTGbR6czvIM5LKbD9Qqlh3ZrNHokWmTR VQQpJxt8HwP5boQvwRHg9+KSGr4JvRko1qxFs9C7Bzjt4r9VxdjhwZPdy0McGI/z yPXyQHjqBayrHV1EwQJBANorfCKeIxLhH3LAeUZuRS8ACldJ2N1kL6Ov43/v+0S/ OprQeBTODuTds3sv7FCT1aYDTOe6JLNOwN2i4YVOMBsCQQDMuCozrwqftD17D06P 9+lRXUekY5kFBs5j28Xnl8t8jnuxsXtQUTru660LD0QrmDNSauhpEmlpJknicnGt hmwRAkEA12MI6bBPlir0/jgxQqxI1w7mJqj8Vg27zpEuO7dzzLoyJHddpcSNBbwu npaAakiZK42klj26T9+XHvjYRuAbMwJBAJ5WnwWEkGH/pUHGEAyYQdSVojDKe/MA Vae0tzguFswK5C8GyArSGRPsItYYA7D4MlG/sGx8Oh2C6MiFndkJzBECQDcP1y4r Qsek151t1zArLKH4gG5dQAeZ0Lc2VeC4nLMUqVwrHcZDdd1RzLlSaH3j1MekFVfT 6v6rrcNLEVbeuk4= -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIC7jCCAlegAwIBAgIBCjANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNTEz MjU0MFoXDTQxMDQyMTEzMjU0MFowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEB BQADgY0AMIGJAoGBAK53miP9GczBWXnqNxHwQkgVqsDuesjwJbWilMK4gf3fjnf2 PN3qDpnGbZbPD0ij8975pIKtSPoDycFmA8Mogip0yU2Lv2lL56CWthSBftOFDL2C WIsmuuURFXZPiVLtLytfI9oLASZFlywWCs83qEDTvdW8VoVhVsxV1JFDnpXLAgMB AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBQgCkKiZhUV9/Zo7RwYYwm2cNK6tzAf BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB gQCbsfr+Q4pty4Fy38lSxoCgnbB4pX6+Ex3xyw5zxDYR3xUlb/uHBiNZ1dBrXBxU ekU8dEvf+hx4iRDSW/C5N6BGnBBhCHcrPabo2bEEWKVsbUC3xchTB5rNGkvnMt9t G9ol7vanuzjL3S8/2PB33OshkBH570CxqqPflQbdjwt9dg== -----END CERTIFICATE----- mongo-2.5.1/spec/support/certificates/ca.pem0000644000004100000410000000171113257253113021071 0ustar www-datawww-data-----BEGIN CERTIFICATE----- MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0 IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27 nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB 2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E qQ== -----END CERTIFICATE----- mongo-2.5.1/spec/support/certificates/client_cert.pem0000644000004100000410000000235513257253113023006 0ustar www-datawww-data-----BEGIN CERTIFICATE----- MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0 NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG 9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54 xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh 9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg= -----END CERTIFICATE----- mongo-2.5.1/spec/support/certificates/crl_client_revoked.pem0000644000004100000410000000121613257253113024343 0ustar www-datawww-data-----BEGIN X509 CRL----- MIIBujCCASMCAQEwDQYJKoZIhvcNAQEFBQAwgZIxCzAJBgNVBAYTAlVTMREwDwYD VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwF MTBHZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3Jp dHkxGzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1cxcNMTMxMjA2MTUzMzUwWhcN MTQwMTA1MTUzMzUwWjBMMBICAQwXDTEzMTIwNjE1MjczMFowGgIJAJGUg/wuW1KD Fw0xMjEyMTIxODQ4MjJaMBoCCQCRlIP8LltShRcNMTIxMjEyMTg0ODUyWqAOMAww CgYDVR0UBAMCAQ4wDQYJKoZIhvcNAQEFBQADgYEAERPfPdQnIafo1lYbFEx2ojrb eYqvWN9ykTyUGq2bKv+STYiuaKUz6daGVjELjn/safn5wHkYr9+C/kRRoCor5HYw N3uxHnkMpl6Xn7kgXL2b0jbdvfa44faOXdH2gbhzd8bFsOMra4QJHT6CgpYb3ei1 +ePhAd1KS7tS/dyyP4c= -----END X509 CRL----- mongo-2.5.1/spec/support/certificates/crl.pem0000644000004100000410000000104413257253113021265 0ustar www-datawww-data-----BEGIN X509 CRL----- MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0 eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQ3NDFaFw00 MDA0MjgxODQ3NDFaoA4wDDAKBgNVHRQEAwIBCzANBgkqhkiG9w0BAQUFAAOBgQAu PlPDGei2q6kdkoHe8vmDuts7Hm/o9LFbBmn0XUcfHisCJCPsJTyGCsgnfIiBcXJY 1LMKsQFnYGv28rE2ZPpFg2qNxL+6qUEzCvqaHLX9q1V0F+f8hHDxucNYu52oo/h0 uNZxB1KPFI2PReG5d3oUYqJ2+EctKkrGtxSPzbN0gg== -----END X509 CRL----- mongo-2.5.1/spec/support/server_selection.rb0000644000004100000410000001246613257253113021247 0ustar www-datawww-datamodule Mongo module ServerSelection module Read # Represents a Server Selection specification test. # # @since 2.0.0 class Spec # Mapping of topology description strings to topology type classes. # # @since 2.0.0 TOPOLOGY_TYPES = { 'ReplicaSetNoPrimary' => Mongo::Cluster::Topology::ReplicaSet, 'ReplicaSetWithPrimary' => Mongo::Cluster::Topology::ReplicaSet, 'Sharded' => Mongo::Cluster::Topology::Sharded, 'Single' => Mongo::Cluster::Topology::Single, 'Unknown' => Mongo::Cluster::Topology::Unknown } # Mapping of read preference modes. # # @since 2.0.0 READ_PREFERENCES = { 'Primary' => :primary, 'Secondary' => :secondary, 'PrimaryPreferred' => :primary_preferred, 'SecondaryPreferred' => :secondary_preferred, 'Nearest' => :nearest, } # @return [ String ] description The spec description. # # @since 2.0.0 attr_reader :description # @return [ Hash ] read_preference The read preference to be used for selection. # # @since 2.0.0 attr_reader :read_preference # @return [ Integer ] heartbeat_frequency The heartbeat frequency to be set on the client. # # @since 2.4.0 attr_reader :heartbeat_frequency # @return [ Integer ] max_staleness The max_staleness. # # @since 2.4.0 attr_reader :max_staleness # @return [ Array ] eligible_servers The eligible servers before the latency # window is taken into account. # # @since 2.0.0 attr_reader :eligible_servers # @return [ Array ] suitable_servers The set of servers matching all server # selection logic. May be a subset of eligible_servers and/or candidate_servers. # # @since 2.0.0 attr_reader :suitable_servers # @return [ Mongo::Cluster::Topology ] type The topology type. # # @since 2.0.0 attr_reader :type # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.0.0 def initialize(file) file = File.new(file) @test = YAML.load(ERB.new(file.read).result) file.close @description = "#{@test['topology_description']['type']}: #{File.basename(file)}" @heartbeat_frequency = @test['heartbeatFrequencyMS'] / 1000 if @test['heartbeatFrequencyMS'] @read_preference = @test['read_preference'] @read_preference['mode'] = READ_PREFERENCES[@read_preference['mode']] @max_staleness = @read_preference['maxStalenessSeconds'] @candidate_servers = @test['topology_description']['servers'] @suitable_servers = @test['suitable_servers'] || [] @in_latency_window = @test['in_latency_window'] || [] @type = TOPOLOGY_TYPES[@test['topology_description']['type']] end # Whether this spec describes a replica set. # # @example Determine if the spec describes a replica set. # spec.replica_set? # # @return [true, false] If the spec describes a replica set. # # @since 2.0.0 def replica_set? type == Mongo::Cluster::Topology::ReplicaSet end # Does this spec expect a server to be found. # # @example Will a server be found with this spec. # spec.server_available? # # @return [true, false] If a server will be found with this spec. # # @since 2.0.0 def server_available? !in_latency_window.empty? end # Is the max staleness setting invalid. # # @example Will the max staleness setting be valid with other options. # spec.invalid_max_staleness? # # @return [ true, false ] If an error will be raised by the max staleness setting. # # @since 2.4.0 def invalid_max_staleness? @test['error'] end # The subset of suitable servers that falls within the allowable latency # window. # We have to correct for our server selection algorithm that adds the primary # to the end of the list for SecondaryPreferred read preference mode. # # @example Get the list of suitable servers within the latency window. # spec.in_latency_window # # @return [ Array ] The servers within the latency window. # # @since 2.0.0 def in_latency_window if read_preference['mode'] == :secondary_preferred && primary return @in_latency_window.push(primary).uniq end @in_latency_window end # The servers a topology would return as candidates for selection. # # @return [ Array ] candidate_servers The candidate servers. # # @since 2.0.0 def candidate_servers @candidate_servers.select { |s| s['type'] != 'Unknown' } end private def primary @candidate_servers.find { |s| s['type'] == 'RSPrimary' } end end end end end mongo-2.5.1/spec/support/connection_string_tests/0000755000004100000410000000000013257253113022305 5ustar www-datawww-datamongo-2.5.1/spec/support/connection_string_tests/valid-host_identifiers.yml0000644000004100000410000000575013257253113027476 0ustar www-datawww-datatests: - description: "Single IPv4 host without port" uri: "mongodb://127.0.0.1" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: ~ options: ~ - description: "Single IPv4 host with port" uri: "mongodb://127.0.0.1:27018" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: 27018 auth: ~ options: ~ - description: "Single IP literal host without port" uri: "mongodb://[::1]" valid: true warning: false hosts: - type: "ip_literal" host: "::1" port: ~ auth: ~ options: ~ - description: "Single IP literal host with port" uri: "mongodb://[::1]:27019" valid: true warning: false hosts: - type: "ip_literal" host: "::1" port: 27019 auth: ~ options: ~ - description: "Single hostname without port" uri: "mongodb://example.com" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: ~ options: ~ - description: "Single hostname with port" uri: "mongodb://example.com:27020" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: 27020 auth: ~ options: ~ - description: "Single hostname (resembling IPv4) without port" uri: "mongodb://256.0.0.1" valid: true warning: false hosts: - type: "hostname" host: "256.0.0.1" port: ~ auth: ~ options: ~ - description: "Multiple hosts (mixed formats)" uri: "mongodb://127.0.0.1,[::1]:27018,example.com:27019" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ - type: "ip_literal" host: "::1" port: 27018 - type: "hostname" host: "example.com" port: 27019 auth: ~ options: ~ - description: "UTF-8 hosts" uri: "mongodb://bücher.example.com,umläut.example.com/" valid: true warning: false hosts: - type: "hostname" host: "bücher.example.com" port: ~ - type: "hostname" host: "umläut.example.com" port: ~ auth: ~ options: ~ mongo-2.5.1/spec/support/connection_string_tests/valid-unix_socket-absolute.yml0000644000004100000410000001351613257253113030302 0ustar www-datawww-datatests: - description: "Unix domain socket (absolute path with trailing slash)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock/" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket (absolute path without trailing slash)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket (absolute path with spaces in path)" uri: "mongodb://%2Ftmp%2F %2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "/tmp/ /mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Multiple Unix domain sockets (absolute paths)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: ~ options: ~ - description: "Multiple hosts (absolute path and ipv4)" uri: "mongodb://127.0.0.1:27017,%2Ftmp%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: 27017 - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Multiple hosts (absolute path and hostname resembling relative path)" uri: "mongodb://mongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock" valid: true warning: false hosts: - type: "hostname" host: "mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with auth database (absolute path)" uri: "mongodb://alice:foo@%2Ftmp%2Fmongodb-27017.sock/admin" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ auth: username: "alice" password: "foo" db: "admin" options: ~ - description: "Unix domain socket with path resembling socket file (absolute path with trailing slash)" uri: "mongodb://%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock/" valid: true warning: false hosts: - type: "unix" host: "/tmp/path.to.sock/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with path resembling socket file (absolute path without trailing slash)" uri: "mongodb://%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "/tmp/path.to.sock/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with path resembling socket file and auth (absolute path)" uri: "mongodb://bob:bar@%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock/admin" valid: true warning: false hosts: - type: "unix" host: "/tmp/path.to.sock/mongodb-27017.sock" port: ~ auth: username: "bob" password: "bar" db: "admin" options: ~ - description: "Multiple Unix domain sockets and auth DB (absolute path)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin" options: ~ - description: "Multiple Unix domain sockets with auth DB (absolute path)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin" options: ~ - description: "Multiple Unix domain sockets with auth and query string (absolute path)" uri: "mongodb://bob:bar@%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin?w=1" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: username: "bob" password: "bar" db: "admin" options: w: 1 mongo-2.5.1/spec/support/connection_string_tests/valid-db-with-dotted-name.yml0000644000004100000410000000450513257253113027666 0ustar www-datawww-datatests: - description: "Multiple Unix domain sockets and auth DB resembling a socket (relative path)" uri: "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin.sock" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin.sock" options: ~ - description: "Multiple Unix domain sockets with auth DB resembling a path (relative path)" uri: "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin.shoe" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin.shoe" options: ~ - description: "Multiple Unix domain sockets and auth DB resembling a socket (absolute path)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin.sock" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin.sock" options: ~ - description: "Multiple Unix domain sockets with auth DB resembling a path (absolute path)" uri: "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin.shoe" valid: true warning: false hosts: - type: "unix" host: "/tmp/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin.shoe" options: ~ mongo-2.5.1/spec/support/connection_string_tests/valid-auth.yml0000644000004100000410000001517013257253113025072 0ustar www-datawww-datatests: - description: "User info for single IPv4 host without database" uri: "mongodb://alice:foo@127.0.0.1" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: username: "alice" password: "foo" db: ~ options: ~ - description: "User info for single IPv4 host with database" uri: "mongodb://alice:foo@127.0.0.1/test" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: username: "alice" password: "foo" db: "test" options: ~ - description: "User info for single IP literal host without database" uri: "mongodb://bob:bar@[::1]:27018" valid: true warning: false hosts: - type: "ip_literal" host: "::1" port: 27018 auth: username: "bob" password: "bar" db: ~ options: ~ - description: "User info for single IP literal host with database" uri: "mongodb://bob:bar@[::1]:27018/admin" valid: true warning: false hosts: - type: "ip_literal" host: "::1" port: 27018 auth: username: "bob" password: "bar" db: "admin" options: ~ - description: "User info for single hostname without database" uri: "mongodb://eve:baz@example.com" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: username: "eve" password: "baz" db: ~ options: ~ - description: "User info for single hostname with database" uri: "mongodb://eve:baz@example.com/db2" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: username: "eve" password: "baz" db: "db2" options: ~ - description: "User info for multiple hosts without database" uri: "mongodb://alice:secret@127.0.0.1,example.com:27018" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ - type: "hostname" host: "example.com" port: 27018 auth: username: "alice" password: "secret" db: ~ options: ~ - description: "User info for multiple hosts with database" uri: "mongodb://alice:secret@example.com,[::1]:27019/admin" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ - type: "ip_literal" host: "::1" port: 27019 auth: username: "alice" password: "secret" db: "admin" options: ~ - description: "Username without password" uri: "mongodb://alice@127.0.0.1" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: username: "alice" password: ~ db: ~ options: ~ - description: "Username with empty password" uri: "mongodb://alice:@127.0.0.1" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: username: "alice" password: "" db: ~ options: ~ - description: "Escaped username and database without password" uri: "mongodb://%40l%3Ace%2F%3D@example.com/my%3Ddb" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: username: "@l:ce/=" password: ~ db: "my=db" options: ~ - description: "Escaped user info and database (MONGODB-CR)" uri: "mongodb://%24am:f%3Azzb%40z%2Fz%3D@127.0.0.1/admin%3F?authMechanism=MONGODB-CR" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: ~ auth: username: "$am" password: "f:zzb@z/z=" db: "admin?" options: authmechanism: "MONGODB-CR" - description: "Escaped username (MONGODB-X509)" uri: "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509" valid: true warning: false hosts: - type: "hostname" host: "localhost" port: ~ auth: username: "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry" password: ~ db: ~ options: authmechanism: "MONGODB-X509" - description: "Escaped username (GSSAPI)" uri: "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authMechanism=GSSAPI" valid: true warning: false hosts: - type: "hostname" host: "localhost" port: ~ auth: username: "user@EXAMPLE.COM" password: "secret" db: ~ options: authmechanism: "GSSAPI" authmechanismproperties: SERVICE_NAME: "other" CANONICALIZE_HOST_NAME: true - description: "At-signs in options aren't part of the userinfo" uri: "mongodb://alice:secret@example.com/admin?replicaset=my@replicaset" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: username: "alice" password: "secret" db: "admin" options: replicaset: "my@replicaset" mongo-2.5.1/spec/support/connection_string_tests/valid-unix_socket-relative.yml0000644000004100000410000001442013257253113030272 0ustar www-datawww-datatests: - description: "Unix domain socket (relative path with trailing slash)" uri: "mongodb://rel%2Fmongodb-27017.sock/" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket (relative path without trailing slash)" uri: "mongodb://rel%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket (relative path with spaces)" uri: "mongodb://rel%2F %2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "rel/ /mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Multiple Unix domain sockets (relative paths)" uri: "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: ~ options: ~ - description: "Multiple Unix domain sockets (relative and absolute paths)" uri: "mongodb://rel%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "/tmp/mongodb-27018.sock" port: ~ auth: ~ options: ~ - description: "Multiple hosts (relative path and ipv4)" uri: "mongodb://127.0.0.1:27017,rel%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "ipv4" host: "127.0.0.1" port: 27017 - type: "unix" host: "rel/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Multiple hosts (relative path and hostname resembling relative path)" uri: "mongodb://mongodb-27017.sock,rel%2Fmongodb-27018.sock" valid: true warning: false hosts: - type: "hostname" host: "mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with auth database (relative path)" uri: "mongodb://alice:foo@rel%2Fmongodb-27017.sock/admin" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ auth: username: "alice" password: "foo" db: "admin" options: ~ - description: "Unix domain socket with path resembling socket file (relative path with trailing slash)" uri: "mongodb://rel%2Fpath.to.sock%2Fmongodb-27017.sock/" valid: true warning: false hosts: - type: "unix" host: "rel/path.to.sock/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with path resembling socket file (relative path without trailing slash)" uri: "mongodb://rel%2Fpath.to.sock%2Fmongodb-27017.sock" valid: true warning: false hosts: - type: "unix" host: "rel/path.to.sock/mongodb-27017.sock" port: ~ auth: ~ options: ~ - description: "Unix domain socket with path resembling socket file and auth (relative path)" uri: "mongodb://bob:bar@rel%2Fpath.to.sock%2Fmongodb-27017.sock/admin" valid: true warning: false hosts: - type: "unix" host: "rel/path.to.sock/mongodb-27017.sock" port: ~ auth: username: "bob" password: "bar" db: "admin" options: ~ - description: "Multiple Unix domain sockets and auth DB resembling a socket (relative path)" uri: "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin" options: ~ - description: "Multiple Unix domain sockets with auth DB resembling a path (relative path)" uri: "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: username: ~ password: ~ db: "admin" options: ~ - description: "Multiple Unix domain sockets with auth and query string (relative path)" uri: "mongodb://bob:bar@rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin?w=1" valid: true warning: false hosts: - type: "unix" host: "rel/mongodb-27017.sock" port: ~ - type: "unix" host: "rel/mongodb-27018.sock" port: ~ auth: username: "bob" password: "bar" db: "admin" options: w: 1 mongo-2.5.1/spec/support/connection_string_tests/valid-warnings.yml0000644000004100000410000000304313257253113025755 0ustar www-datawww-datatests: - description: "Unrecognized option keys are ignored" uri: "mongodb://example.com/?foo=bar" valid: true warning: true hosts: - type: "hostname" host: "example.com" port: ~ auth: ~ options: ~ # # A Mongo::Client doesn't validate option values when initialized so the uri doesn't either. # - # description: "Unsupported option values are ignored" # uri: "mongodb://example.com/?fsync=ifPossible" # valid: true # warning: true # hosts: # - # type: "hostname" # host: "example.com" # port: ~ # auth: ~ # options: ~ # - description: "Repeated option keys" uri: "mongodb://example.com/?replicaSet=test&replicaSet=test" valid: true warning: true hosts: - type: "hostname" host: "example.com" port: ~ auth: ~ options: replicaset: "test" # # Change this test in the future if there is a deprecated option in driver 2.0 # - # description: "Deprecated (or unknown) options are ignored if replacement exists" # uri: "mongodb://example.com/?wtimeout=5&wtimeoutMS=10" # valid: true # warning: true # hosts: # - # type: "hostname" # host: "example.com" # port: ~ # auth: ~ # options: # wtimeoutms: 10 mongo-2.5.1/spec/support/connection_string_tests/invalid-uris.yml0000644000004100000410000001406713257253113025446 0ustar www-datawww-datatests: - description: "Empty string" uri: "" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid scheme" uri: "mongo://localhost:27017" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Missing host" uri: "mongodb://" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Double colon in host identifier" uri: "mongodb://localhost::27017" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Double colon in host identifier and trailing slash" uri: "mongodb://localhost::27017/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Double colon in host identifier with missing host and port" uri: "mongodb://::" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Double colon in host identifier with missing port" uri: "mongodb://localhost,localhost::" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Double colon in host identifier and second host" uri: "mongodb://localhost::27017,abc" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (negative number) with hostname" uri: "mongodb://localhost:-1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (zero) with hostname" uri: "mongodb://localhost:0/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (positive number) with hostname" uri: "mongodb://localhost:65536" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (positive number) with hostname and trailing slash" uri: "mongodb://localhost:65536/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (non-numeric string) with hostname" uri: "mongodb://localhost:foo" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (negative number) with IP literal" uri: "mongodb://[::1]:-1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (zero) with IP literal" uri: "mongodb://[::1]:0/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (positive number) with IP literal" uri: "mongodb://[::1]:65536" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (positive number) with IP literal and trailing slash" uri: "mongodb://[::1]:65536/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Invalid port (non-numeric string) with IP literal" uri: "mongodb://[::1]:foo" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Missing delimiting slash between hosts and options" uri: "mongodb://example.com?w=1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Incomplete key value pair for option" uri: "mongodb://example.com/?w" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username with password containing an unescaped colon" uri: "mongodb://alice:foo:bar@127.0.0.1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username containing an unescaped at-sign" uri: "mongodb://alice@@127.0.0.1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username with password containing an unescaped at-sign" uri: "mongodb://alice@foo:bar@127.0.0.1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username containing an unescaped slash" uri: "mongodb://alice/@localhost/db" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username containing unescaped slash with password" uri: "mongodb://alice/bob:foo@localhost/db" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username with password containing an unescaped slash" uri: "mongodb://alice:foo/bar@localhost/db" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Host with unescaped slash" uri: "mongodb:///tmp/mongodb-27017.sock/" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "mongodb+srv with multiple service names" uri: "mongodb+srv://test5.test.mongodb.com,test6.test.mongodb.com" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "mongodb+srv with port number" uri: "mongodb+srv://test7.test.mongodb.com:27018" valid: false warning: ~ hosts: ~ auth: ~ options: ~ - description: "Username with password containing an unescaped percent sign" uri: "mongodb://alice%foo:bar@127.0.0.1" valid: false warning: ~ hosts: ~ auth: ~ options: ~ mongo-2.5.1/spec/support/connection_string_tests/valid-options.yml0000644000004100000410000000073713257253113025627 0ustar www-datawww-datatests: - description: "Option names are normalized to lowercase" uri: "mongodb://alice:secret@example.com/admin?AUTHMechanism=MONGODB-CR" valid: true warning: false hosts: - type: "hostname" host: "example.com" port: ~ auth: username: "alice" password: "secret" db: "admin" options: authmechanism: "MONGODB-CR" mongo-2.5.1/spec/support/command_monitoring/0000755000004100000410000000000013257253113021221 5ustar www-datawww-datamongo-2.5.1/spec/support/command_monitoring/updateOne.yml0000644000004100000410000000456413257253113023701 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful update one" operation: name: "updateOne" arguments: filter: _id: { $gt: 1 } update: $inc: { x: 1 } expectations: - command_started_event: command: update: *collection_name ordered: true updates: - q: { _id: { $gt: 1 }} u: { $inc: { x: 1 }} multi: false upsert: false command_name: "update" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "update" - description: "A successful update one with upsert when the upserted id is not an object id" operation: name: "updateOne" arguments: filter: _id: 4 update: $inc: { x: 1 } upsert: true expectations: - command_started_event: command: update: *collection_name ordered: true updates: - q: { _id: 4 } u: { $inc: { x: 1 } } multi: false upsert: true command_name: "update" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1, upserted: [{ index: 0, _id: 4 }] } command_name: "update" - description: "A successful update one command with write errors" operation: name: "updateOne" arguments: filter: _id: { $gt: 1 } update: $nothing: { x: 1 } expectations: - command_started_event: command: update: *collection_name ordered: true updates: - q: { _id: { $gt: 1 }} u: { $nothing: { x: 1 }} multi: false upsert: false command_name: "update" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "update" mongo-2.5.1/spec/support/command_monitoring/insertOne.yml0000644000004100000410000000237513257253113023721 0ustar www-datawww-datadata: - { _id: 1, x: 11 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful insert one" operation: name: "insertOne" arguments: document: { _id: 2, x: 22 } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 2, x: 22 } ordered: true command_name: "insert" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "insert" - description: "A successful insert one command with write errors" operation: name: "insertOne" arguments: document: { _id: 1, x: 11 } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 1, x: 11 } ordered: true command_name: "insert" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "insert" mongo-2.5.1/spec/support/command_monitoring/bulkWrite.yml0000644000004100000410000000376413257253113023726 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful mixed bulk write" operation: name: "bulkWrite" arguments: requests: - insertOne: document: { _id: 4, x: 44 } - updateOne: filter: { _id: 3 } update: { $set: { x: 333 } } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 4, x: 44 } ordered: true command_name: "insert" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "insert" - command_started_event: command: update: *collection_name updates: - { q: {_id: 3 }, u: { $set: { x: 333 } }, upsert: false, multi: false } ordered: true command_name: "update" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "update" - description: "A successful unordered bulk write with an unacknowledged write concern" comment: "On a 2.4 server, no GLE is sent and requires a client-side manufactored reply" operation: name: "bulkWrite" arguments: requests: - insertOne: document: { _id: 4, x: 44 } ordered: false writeConcern: { w: 0 } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 4, x: 44 } ordered: false writeConcern: { w: 0 } command_name: "insert" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0 } command_name: "insert" mongo-2.5.1/spec/support/command_monitoring/deleteMany.yml0000644000004100000410000000254313257253113024037 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful delete many" operation: name: "deleteMany" arguments: filter: _id: { $gt: 1 } expectations: - command_started_event: command: delete: *collection_name deletes: - { q: { _id: { $gt: 1 }}, limit: 0 } ordered: true command_name: "delete" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 2 } command_name: "delete" - description: "A successful delete many command with write errors" operation: name: "deleteMany" arguments: filter: _id: { $nothing: 1 } expectations: - command_started_event: command: delete: *collection_name deletes: - { q: { _id: { $nothing: 1 }}, limit: 0 } ordered: true command_name: "delete" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "delete" mongo-2.5.1/spec/support/command_monitoring/command.yml0000644000004100000410000000172613257253113023370 0ustar www-datawww-datadata: - { _id: 1, x: 11 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful command" operation: name: "count" arguments: filter: { _id: 1 } expectations: - command_started_event: command: count: *collection_name query: { _id: 1 } command_name: "count" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "count" - description: "A failed command event" operation: name: "count" arguments: filter: { $or: true } expectations: - command_started_event: command: count: *collection_name query: { $or: true } command_name: "count" database_name: *database_name - command_failed_event: command_name: "count" mongo-2.5.1/spec/support/command_monitoring/deleteOne.yml0000644000004100000410000000253713257253113023657 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful delete one" operation: name: "deleteOne" arguments: filter: _id: { $gt: 1 } expectations: - command_started_event: command: delete: *collection_name deletes: - { q: { _id: { $gt: 1 }}, limit: 1 } ordered: true command_name: "delete" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 1 } command_name: "delete" - description: "A successful delete one command with write errors" operation: name: "deleteOne" arguments: filter: _id: { $nothing: 1 } expectations: - command_started_event: command: delete: *collection_name deletes: - { q: { _id: { $nothing: 1 }}, limit: 1 } ordered: true command_name: "delete" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "delete" mongo-2.5.1/spec/support/command_monitoring/updateMany.yml0000644000004100000410000000316513257253113024060 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful update many" operation: name: "updateMany" arguments: filter: _id: { $gt: 1 } update: $inc: { x: 1 } expectations: - command_started_event: command: update: *collection_name ordered: true updates: - q: { _id: { $gt: 1 }} u: { $inc: { x: 1 }} multi: true upsert: false command_name: "update" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 2 } command_name: "update" - description: "A successful update many command with write errors" operation: name: "updateMany" arguments: filter: _id: { $gt: 1 } update: $nothing: { x: 1 } expectations: - command_started_event: command: update: *collection_name ordered: true updates: - q: { _id: { $gt: 1 }} u: { $nothing: { x: 1 }} multi: true upsert: false command_name: "update" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "update" mongo-2.5.1/spec/support/command_monitoring/find.yml0000644000004100000410000001603213257253113022666 0ustar www-datawww-datadata: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } - { _id: 4, x: 44 } - { _id: 5, x: 55 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" namespace: &namespace "ruby-driver.test" tests: - description: "A successful find event with no options" operation: name: "find" arguments: filter: { _id: 1 } expectations: - command_started_event: command: find: *collection_name filter: { _id: 1 } command_name: "find" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "0" } ns: *namespace firstBatch: - { _id: 1, x: 11 } command_name: "find" - description: "A successful find event with options" operation: name: "find" read_preference: { mode: "primaryPreferred" } arguments: filter: { _id: { $gt: 1 } } sort: { _id: 1 } skip: 2 modifiers: $comment: "test" $hint: { _id: 1 } $max: { _id: 6 } $maxScan: 5000 $maxTimeMS: 6000 $min: { _id: 0 } $returnKey: false $showDiskLoc: false $snapshot: false expectations: - command_started_event: command: find: *collection_name filter: { _id: { $gt: 1 } } sort: { _id: 1 } skip: 2 comment: "test" hint: { _id: 1 } max: { _id: 6 } maxScan: 5000 maxTimeMS: 6000 min: { _id: 0 } returnKey: false showRecordId: false snapshot: false command_name: "find" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "0" } ns: *namespace firstBatch: - { _id: 4, x: 44 } - { _id: 5, x: 55 } command_name: "find" - description: "A successful find event with a getmore" operation: name: "find" arguments: filter: { _id: { $gte: 1 }} sort: { _id: 1 } batchSize: 3 expectations: - command_started_event: command: find: *collection_name filter: { _id: { $gte : 1 }} sort: { _id: 1 } batchSize: 3 command_name: "find" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "42" } ns: *namespace firstBatch: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } command_name: "find" - command_started_event: command: getMore: { $numberLong: "42" } collection: *collection_name batchSize: 3 command_name: "getMore" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "0" } ns: *namespace nextBatch: - { _id: 4, x: 44 } - { _id: 5, x: 55 } command_name: "getMore" - description: "A successful find event with a getmore and killcursors" ignore_if_server_version_greater_than: "3.0" operation: name: "find" arguments: filter: { _id: { $gte: 1 }} sort: { _id: 1 } batchSize: 3 limit: 4 expectations: - command_started_event: command: find: *collection_name filter: { _id: { $gte : 1 }} sort: { _id: 1 } batchSize: 3 limit: 4 command_name: "find" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "42" } ns: *namespace firstBatch: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } command_name: "find" - command_started_event: command: getMore: { $numberLong: "42" } collection: *collection_name batchSize: 1 command_name: "getMore" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "42" } ns: *namespace nextBatch: - { _id: 4, x: 44 } command_name: "getMore" - command_started_event: command: killCursors: *collection_name cursors: - { $numberLong: "42" } command_name: "killCursors" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursorsUnknown: - { $numberLong: "42" } command_name: "killCursors" - description: "A successful find event with a getmore and the server kills the cursor" ignore_if_server_version_less_than: "3.1" operation: name: "find" arguments: filter: { _id: { $gte: 1 }} sort: { _id: 1 } batchSize: 3 limit: 4 expectations: - command_started_event: command: find: *collection_name filter: { _id: { $gte : 1 }} sort: { _id: 1 } batchSize: 3 limit: 4 command_name: "find" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "42" } ns: *namespace firstBatch: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } command_name: "find" - command_started_event: command: getMore: { $numberLong: "42" } collection: *collection_name batchSize: 3 command_name: "getMore" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 cursor: id: { $numberLong: "0" } ns: *namespace nextBatch: - { _id: 4, x: 44 } command_name: "getMore" - description: "A failed find event" operation: name: "find" arguments: filter: { $or: true } expectations: - command_started_event: command: find: *collection_name filter: { $or: true } command_name: "find" database_name: *database_name - command_failed_event: command_name: "find" mongo-2.5.1/spec/support/command_monitoring/insertMany.yml0000644000004100000410000000402113257253113024072 0ustar www-datawww-datadata: - { _id: 1, x: 11 } collection_name: &collection_name "test" database_name: &database_name "ruby-driver" tests: - description: "A successful insert many" operation: name: "insertMany" arguments: documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } ordered: true command_name: "insert" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 2 } command_name: "insert" - description: "A successful insert many command with write errors" operation: name: "insertMany" arguments: documents: - { _id: 1, x: 11 } - { _id: 2, x: 22 } expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 1, x: 11 } - { _id: 2, x: 22 } ordered: true command_name: "insert" database_name: *database_name - command_succeeded_event: reply: ok: 1.0 n: 0 writeErrors: - { index: 0, code: 42, errmsg: "" } command_name: "insert" - description: "A successful unordered insert many" operation: name: "insertMany" arguments: documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } ordered: false expectations: - command_started_event: command: insert: *collection_name documents: - { _id: 2, x: 22 } - { _id: 3, x: 33 } ordered: false command_name: "insert" database_name: *database_name - command_succeeded_event: reply: { ok: 1.0, n: 2 } command_name: "insert" mongo-2.5.1/spec/support/max_staleness/0000755000004100000410000000000013257253113020204 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/Unknown/0000755000004100000410000000000013257253113021643 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/Unknown/SmallMaxStaleness.yml0000644000004100000410000000046113257253113025767 0ustar www-datawww-data# Driver doesn't validate maxStalenessSeconds while TopologyType is Unknown. --- heartbeatFrequencyMS: 10000 topology_description: type: Unknown servers: - &1 address: a:27017 type: Unknown read_preference: mode: Nearest maxStalenessSeconds: 1 suitable_servers: [] in_latency_window: [] mongo-2.5.1/spec/support/max_staleness/Single/0000755000004100000410000000000013257253113021425 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/Single/Incompatible.yml0000644000004100000410000000076113257253113024562 0ustar www-datawww-data# During server selection, clients (drivers or mongos) MUST raise an error if # maxStalenessSeconds is defined and not -1 and any server's ``maxWireVersion`` # is less than 5 (`SERVER-23893`_). --- topology_description: type: Single servers: - &1 address: a:27017 type: Standalone avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 120 error: true mongo-2.5.1/spec/support/max_staleness/Single/SmallMaxStaleness.yml0000644000004100000410000000064013257253113025550 0ustar www-datawww-data# Driver doesn't validate maxStalenessSeconds for direct connection. --- heartbeatFrequencyMS: 10000 topology_description: type: Single servers: - &1 address: a:27017 type: Standalone avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 1 suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/Sharded/0000755000004100000410000000000013257253113021556 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/Sharded/Incompatible.yml0000644000004100000410000000121413257253113024705 0ustar www-datawww-data# During server selection, clients (drivers or mongos) MUST raise an error if # maxStalenessSeconds is defined and not -1 and any server's ``maxWireVersion`` # is less than 5 (`SERVER-23893`_). --- topology_description: type: Sharded servers: - &1 address: a:27017 type: Mongos avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: Mongos avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 120 error: true mongo-2.5.1/spec/support/max_staleness/Sharded/SmallMaxStaleness.yml0000644000004100000410000000112513257253113025700 0ustar www-datawww-data# Driver doesn't validate maxStalenessSeconds for mongos --- heartbeatFrequencyMS: 10000 topology_description: type: Sharded servers: - &1 address: a:27017 type: Mongos avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: Mongos avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 1 # OK for sharding. suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/0000755000004100000410000000000013257253113024100 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.yml0000644000004100000410000000100513257253113030066 0ustar www-datawww-data# maxStalenessSeconds=0 is prohibited. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 0 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.yml0000644000004100000410000000106413257253113030412 0ustar www-datawww-data# Filter out the stale secondary. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: SecondaryPreferred maxStalenessSeconds: 120 suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/Secondary.yml0000644000004100000410000000236213257253113026555 0ustar www-datawww-data# Latest secondary's lastWriteDate is used normally with read preference tags. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "125002"}} tags: data_center: tokyo # No match, but its lastWriteDate is used in estimate. - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc - &4 address: d:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. read_preference: mode: Secondary maxStalenessSeconds: 150 tag_sets: - data_center: nyc suitable_servers: - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/Nearest2.yml0000644000004100000410000000143613257253113026312 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. maxWireVersion: 5 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.yml0000644000004100000410000000113213257253113031031 0ustar www-datawww-data# By default, a read preference sets no maximum on staleness. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: Nearest suitable_servers: # Very stale server is fine. - *1 - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.yml0000644000004100000410000000151313257253113027510 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 1 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 25002 # Not used when there's no primary. lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. maxWireVersion: 5 - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 25001 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.yml0000644000004100000410000000237313257253113031434 0ustar www-datawww-data# Latest secondary's lastWriteDate is used normally with read preference tags. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "125002"}} tags: data_center: tokyo # No match, but its lastWriteDate is used in estimate. - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc - &4 address: d:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. read_preference: mode: SecondaryPreferred maxStalenessSeconds: 150 tag_sets: - data_center: nyc suitable_servers: - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/Nearest.yml0000644000004100000410000000143613257253113026230 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. maxWireVersion: 5 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.yml0000644000004100000410000000156113257253113031126 0ustar www-datawww-data# maxStalenessSeconds is applied before tag sets. With tag sets # [{data_center: nyc}, {data_center: tokyo}], if the only node in NYC is stale # then use Tokyo. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 tags: data_center: tokyo # Matches second tag set. - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 tags: data_center: nyc read_preference: mode: PrimaryPreferred maxStalenessSeconds: 150 tag_sets: - data_center: nyc - data_center: tokyo suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/NoKnownServers.yml0000644000004100000410000000050013257253113027561 0ustar www-datawww-data# maxStalenessSeconds must be at least 90 seconds, even with no known servers. --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: Unknown - &2 address: b:27017 type: Unknown read_preference: mode: Nearest maxStalenessSeconds: 1 # Too small. error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/Incompatible.yml0000644000004100000410000000124213257253113027230 0ustar www-datawww-data# During server selection, clients (drivers or mongos) MUST raise an error if # maxStalenessSeconds is defined and not -1 and any server's ``maxWireVersion`` # is less than 5 (`SERVER-23893`_). --- topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 120 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.yml0000644000004100000410000000114113257253113030102 0ustar www-datawww-data# Fallback to secondary if no primary. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetNoPrimary servers: - &1 address: a:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: PrimaryPreferred maxStalenessSeconds: 90 suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/0000755000004100000410000000000013257253113024437 5ustar www-datawww-datamongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.yml0000644000004100000410000000100513257253113030425 0ustar www-datawww-data# maxStalenessSeconds=0 is prohibited. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 0 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.yml0000644000004100000410000000111113257253113030742 0ustar www-datawww-data# Fallback to primary if no secondary is fresh enough. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: SecondaryPreferred maxStalenessSeconds: 120 suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.yml0000644000004100000410000000106113257253113032574 0ustar www-datawww-data# Drivers MUST raise an error if maxStalenessSeconds is defined and not -1 # and the ``mode`` field is 'primary'. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: maxStalenessSeconds: 120 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.yml0000644000004100000410000000135013257253113033171 0ustar www-datawww-data# Primary has wire version 5, secondary has 4, read preference primaryPreferred # with maxStalenessSeconds. The client must error, even though it uses primary and # never applies maxStalenessSeconds. Proves that the compatibility check precedes # filtration. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Too old. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: PrimaryPreferred maxStalenessSeconds: 150 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Nearest_tags.yml0000644000004100000410000000151513257253113027603 0ustar www-datawww-data# maxStalenessSeconds is applied before tag sets. With tag sets # [{data_center: nyc}, {data_center: tokyo}], if the only node in NYC is stale # then use Tokyo. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 tags: data_center: tokyo - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 tags: data_center: nyc read_preference: mode: Nearest maxStalenessSeconds: 150 tag_sets: - data_center: nyc - data_center: tokyo suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Nearest2.yml0000644000004100000410000000143613257253113026651 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. maxWireVersion: 5 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.yml0000644000004100000410000000113213257253113031370 0ustar www-datawww-data# By default, a read preference sets no maximum on staleness. --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: Nearest suitable_servers: # Very stale server is fine. - *1 - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.yml0000644000004100000410000000121213257253113031240 0ustar www-datawww-data# A driver MUST raise an error # if the TopologyType is ReplicaSetWithPrimary or ReplicaSetNoPrimary # and ``maxStalenessSeconds`` is less than 90. --- heartbeatFrequencyMS: 500 topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 89 # Too small. error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.yml0000644000004100000410000000153513257253113030053 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 lastWrite: {lastWriteDate: {$numberLong: "2"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 125001 # Updated 125 sec after primary, so 125 sec stale. # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. lastWrite: {lastWriteDate: {$numberLong: "2"}} maxWireVersion: 5 - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 125001 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Secondary_tags.yml0000644000004100000410000000264213257253113030133 0ustar www-datawww-data# Primary's lastWriteDate is used normally with SecondaryPreferred and tags. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} # Not used in estimate since we have a primary. tags: data_center: nyc - &4 address: d:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc - &5 address: e:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. read_preference: mode: Secondary maxStalenessSeconds: 150 tag_sets: - data_center: nyc suitable_servers: - *2 - *3 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.yml0000644000004100000410000000265313257253113031774 0ustar www-datawww-data# Primary's lastWriteDate is used normally with SecondaryPreferred and tags. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} # Not used in estimate since we have a primary. tags: data_center: nyc - &4 address: d:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc - &5 address: e:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. read_preference: mode: SecondaryPreferred maxStalenessSeconds: 150 tag_sets: - data_center: nyc suitable_servers: - *2 - *3 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.yml0000644000004100000410000000176713257253113030224 0ustar www-datawww-data# maxStalenessSeconds is applied before tag sets. With tag sets # [{data_center: nyc}, {data_center: tokyo}], if the only secondary in NYC is # stale then use Tokyo. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} maxWireVersion: 5 tags: data_center: tokyo - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 tags: data_center: nyc read_preference: mode: Secondary maxStalenessSeconds: 150 tag_sets: - data_center: nyc - data_center: tokyo suitable_servers: - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Nearest.yml0000644000004100000410000000143613257253113026567 0ustar www-datawww-dataheartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. maxWireVersion: 5 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 read_preference: mode: Nearest maxStalenessSeconds: 150 suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.yml0000644000004100000410000000133013257253113027676 0ustar www-datawww-data# If users configure a longer ``heartbeatFrequencyMS`` than the default, # ``maxStalenessSeconds`` might have a larger minimum. --- heartbeatFrequencyMS: 120000 # 120 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 130 # OK, must be 120 + 10 = 130 seconds. suitable_servers: - *1 - *2 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/Incompatible.yml0000644000004100000410000000124213257253113027567 0ustar www-datawww-data# During server selection, clients (drivers or mongos) MUST raise an error if # maxStalenessSeconds is defined and not -1 and any server's ``maxWireVersion`` # is less than 5 (`SERVER-23893`_). --- topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 4 # Incompatible. lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 120 error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.yml0000644000004100000410000000113413257253113030443 0ustar www-datawww-data# Ignore maxStalenessSeconds if primary is available. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: PrimaryPreferred maxStalenessSeconds: 150 suitable_servers: - *1 in_latency_window: - *1 mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.yml0000644000004100000410000000125213257253113027763 0ustar www-datawww-data# If users configure a longer ``heartbeatFrequencyMS`` than the default, # ``maxStalenessSeconds`` might have a larger minimum. --- heartbeatFrequencyMS: 120000 # 120 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 maxWireVersion: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest maxStalenessSeconds: 129 # Too small, must be 120 + 10 = 130 seconds. error: true mongo-2.5.1/spec/support/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.yml0000644000004100000410000000200013257253113032040 0ustar www-datawww-data# maxStalenessSeconds is applied before tag sets. With tag sets # [{data_center: nyc}, {data_center: tokyo}], if the only secondary in NYC is # stale then use Tokyo. --- heartbeatFrequencyMS: 25000 # 25 seconds. topology_description: type: ReplicaSetWithPrimary servers: - &1 address: a:27017 type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} maxWireVersion: 5 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} maxWireVersion: 5 tags: data_center: tokyo - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. maxWireVersion: 5 tags: data_center: nyc read_preference: mode: SecondaryPreferred maxStalenessSeconds: 150 tag_sets: - data_center: nyc - data_center: tokyo suitable_servers: - *2 in_latency_window: - *2 mongo-2.5.1/spec/support/event_subscriber.rb0000644000004100000410000000213213257253113021225 0ustar www-datawww-data# Test event subscriber. # # @since 2.5.0 class EventSubscriber class << self # The started events. # # @since 2.5.0 def started_events @started_events ||= [] end # The succeeded events. # # @since 2.5.0 def succeeded_events @succeeded_events ||= [] end # The failed events. # # @since 2.5.0 def failed_events @failed_events ||= [] end # Cache the succeeded event. # # @param [ Event ] event The event. # # @since 2.5.0 def succeeded(event) succeeded_events.push(event) end # Cache the started event. # # @param [ Event ] event The event. # # @since 2.5.0 def started(event) started_events.push(event) end # Cache the failed event. # # @param [ Event ] event The event. # # @since 2.5.0 def failed(event) failed_events.push(event) end # Clear all cached events. # # @since 2.5.1 def clear_events! @started_events = [] @succeeded_events = [] @failed_events = [] self end end end mongo-2.5.1/spec/support/server_selection_rtt.rb0000644000004100000410000000237513257253113022136 0ustar www-datawww-datamodule Mongo module ServerSelection module RTT # Represents a specification. # # @since 2.0.0 class Spec # @return [ String ] description The spec description. attr_reader :description # @return [ Float ] avg_rtt_ms The starting average round trip time. attr_reader :avg_rtt_ms # @return [ Float ] new_rtt_ms The new round trip time for ismaster. attr_reader :new_rtt_ms # @return [ Float ] new_avg_rtt The newly calculated moving average round trip time. attr_reader :new_avg_rtt # Instantiate the new spec. # # @example Create the spec. # Spec.new(file) # # @param [ String ] file The name of the file. # # @since 2.0.0 def initialize(file) @test = YAML.load(ERB.new(File.new(file).read).result) @description = "avg_rtt_ms: #{@test['avg_rtt_ms']}, new_rtt_ms: #{@test['new_rtt_ms']}," + " new_avg_rtt: #{@test['new_avg_rtt']}" @avg_rtt_ms = @test['avg_rtt_ms'] == 'NULL' ? nil : @test['avg_rtt_ms'].to_f @new_rtt_ms = @test['new_rtt_ms'].to_f @new_avg_rtt = @test['new_avg_rtt'].to_f end end end end end mongo-2.5.1/spec/support/authorization.rb0000644000004100000410000002166713257253113020577 0ustar www-datawww-data# Copyright (C) 2009-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The default test database for all specs. # # @since 2.0.0 TEST_DB = 'ruby-driver'.freeze # The default test collection. # # @since 2.0.0 TEST_COLL = 'test'.freeze # For Evergreen if ENV['MONGODB_URI'] MONGODB_URI = Mongo::URI.new(ENV['MONGODB_URI']) URI_OPTIONS = Mongo::Options::Mapper.transform_keys_to_symbols(MONGODB_URI.uri_options) if URI_OPTIONS[:replica_set] ADDRESSES = MONGODB_URI.servers CONNECT = { connect: :replica_set, replica_set: URI_OPTIONS[:replica_set] } elsif ENV['TOPOLOGY'] == 'sharded_cluster' ADDRESSES = [ MONGODB_URI.servers.first ] # See SERVER-16836 for why we can only use one host:port CONNECT = { connect: :sharded } else ADDRESSES = MONGODB_URI.servers CONNECT = { connect: :direct } end else ADDRESSES = ENV['MONGODB_ADDRESSES'] ? ENV['MONGODB_ADDRESSES'].split(',').freeze : [ '127.0.0.1:27017' ].freeze if ENV['RS_ENABLED'] CONNECT = { connect: :replica_set, replica_set: ENV['RS_NAME'] } elsif ENV['SHARDED_ENABLED'] CONNECT = { connect: :sharded } else CONNECT = { connect: :direct } end end # The write concern to use in the tests. # # @since 2.0.0 WRITE_CONCERN = CONNECT[:connect] == :replica_set ? { w: 2 } : { w: 1 } # An invalid write concern. # # @since 2.4.2 INVALID_WRITE_CONCERN = { w: 4 } # Whether to use SSL. # # @since 2.0.3 SSL = (ENV['SSL'] == 'ssl') || (ENV['SSL_ENABLED'] == 'true') # What compressor to use, if any. # # @since 2.5.0 COMPRESSORS = ENV['COMPRESSORS'] ? { compressors: ENV['COMPRESSORS'].split(',') } : {} # SSL options. # # @since 2.1.0 SSL_OPTIONS = { ssl: SSL, ssl_verify: false, ssl_cert: CLIENT_CERT_PEM, ssl_key: CLIENT_KEY_PEM } # Base test options. # # @since 2.1.0 BASE_OPTIONS = { max_pool_size: 1, write: WRITE_CONCERN, heartbeat_frequency: 20, max_read_retries: 5, wait_queue_timeout: 2, connect_timeout: 3, max_idle_time: 5 } # Options for test suite clients. # # @since 2.0.3 TEST_OPTIONS = BASE_OPTIONS.merge(CONNECT).merge(SSL_OPTIONS).merge(COMPRESSORS) # The root user name. # # @since 2.0.0 ROOT_USER_NAME = (defined?(MONGODB_URI) && MONGODB_URI.credentials[:user]) || 'root-user' # The root user password. # # @since 2.0.0 ROOT_USER_PWD = (defined?(MONGODB_URI) && MONGODB_URI.credentials[:password]) || 'password' # The root user auth source. # # @since 2.4.2 ROOT_USER_AUTH_SOURCE = (defined?(URI_OPTIONS) && URI_OPTIONS[:auth_source]) || Mongo::Database::ADMIN # Gets the root system administrator user. # # @since 2.0.0 ROOT_USER = Mongo::Auth::User.new( user: ROOT_USER_NAME, password: ROOT_USER_PWD, roles: [ Mongo::Auth::Roles::USER_ADMIN_ANY_DATABASE, Mongo::Auth::Roles::DATABASE_ADMIN_ANY_DATABASE, Mongo::Auth::Roles::READ_WRITE_ANY_DATABASE, Mongo::Auth::Roles::HOST_MANAGER, Mongo::Auth::Roles::CLUSTER_ADMIN ] ) # Get the default test user for the suite on versions 2.6 and higher. # # @since 2.0.0 TEST_USER = Mongo::Auth::User.new( database: Mongo::Database::ADMIN, user: 'test-user', password: 'password', roles: [ { role: Mongo::Auth::Roles::READ_WRITE, db: TEST_DB }, { role: Mongo::Auth::Roles::DATABASE_ADMIN, db: TEST_DB }, { role: Mongo::Auth::Roles::READ_WRITE, db: 'invalid_database' }, { role: Mongo::Auth::Roles::DATABASE_ADMIN, db: 'invalid_database' } ] ) # MongoDB 2.4 and lower does not allow hashes as roles, so we need to create a # user on those versions for each database permission in order to ensure the # legacy roles work with users. The following users are those. # Gets the default test user for the suite on 2.4 and lower. # # @since 2.0. TEST_READ_WRITE_USER = Mongo::Auth::User.new( database: TEST_DB, user: TEST_USER.name, password: TEST_USER.password, roles: [ Mongo::Auth::Roles::READ_WRITE, Mongo::Auth::Roles::DATABASE_ADMIN ] ) # Provides an authorized mongo client on the default test database for the # default test user. # # @since 2.0.0 AUTHORIZED_CLIENT = Mongo::Client.new( ADDRESSES, TEST_OPTIONS.merge( database: TEST_DB, user: TEST_USER.name, password: TEST_USER.password) ) # Provides an authorized mongo client that retries writes. # # @since 2.5.1 AUTHROIZED_CLIENT_WITH_RETRY_WRITES = AUTHORIZED_CLIENT.with(retry_writes: true) # Provides an unauthorized mongo client on the default test database. # # @since 2.0.0 UNAUTHORIZED_CLIENT = Mongo::Client.new( ADDRESSES, TEST_OPTIONS.merge(database: TEST_DB, monitoring: false) ) # Provides an unauthorized mongo client on the admin database, for use in # setting up the first admin root user. # # @since 2.0.0 ADMIN_UNAUTHORIZED_CLIENT = Mongo::Client.new( ADDRESSES, TEST_OPTIONS.merge(database: Mongo::Database::ADMIN, monitoring: false) ) # Get an authorized client on the test database logged in as the admin # root user. # # @since 2.0.0 ADMIN_AUTHORIZED_TEST_CLIENT = ADMIN_UNAUTHORIZED_CLIENT.with( user: ROOT_USER.name, password: ROOT_USER.password, database: TEST_DB, auth_source: ROOT_USER_AUTH_SOURCE, monitoring: false ) # A client that has an event subscriber for commands. # # @since 2.5.1 SUBSCRIBED_CLIENT = Mongo::Client.new( ADDRESSES, TEST_OPTIONS.merge( database: TEST_DB, user: TEST_USER.name, password: TEST_USER.password) ) SUBSCRIBED_CLIENT.subscribe(Mongo::Monitoring::COMMAND, EventSubscriber) AUTHROIZED_CLIENT_WITH_RETRY_WRITES.subscribe(Mongo::Monitoring::COMMAND, EventSubscriber) module Authorization # On inclusion provides helpers for use with testing with and without # authorization. # # # @since 2.0.0 def self.included(context) # Gets the root system administrator user. # # @since 2.0.0 context.let(:root_user) { ROOT_USER } # Get the default test user for the suite. # # @since 2.0.0 context.let(:test_user) { TEST_USER } # Provides an authorized mongo client on the default test database for the # default test user. # # @since 2.0.0 context.let(:authorized_client) { AUTHORIZED_CLIENT } # Provides an authorized mongo client on the default test database that retries writes. # # @since 2.5.1 context.let(:authorized_client_with_retry_writes) do EventSubscriber.clear_events! AUTHROIZED_CLIENT_WITH_RETRY_WRITES end # Provides an authorized mongo client that has a Command subscriber. # # @since 2.5.1 context.let(:subscribed_client) do EventSubscriber.clear_events! SUBSCRIBED_CLIENT end # Provides an unauthorized mongo client on the default test database. # # @since 2.0.0 context.let!(:unauthorized_client) { UNAUTHORIZED_CLIENT } # Provides an unauthorized mongo client on the admin database, for use in # setting up the first admin root user. # # @since 2.0.0 context.let!(:admin_unauthorized_client) { ADMIN_UNAUTHORIZED_CLIENT } # Get an authorized client on the test database logged in as the admin # root user. # # @since 2.0.0 context.let!(:root_authorized_client) { ADMIN_AUTHORIZED_TEST_CLIENT } # Gets the default test collection from the authorized client. # # @since 2.0.0 context.let(:authorized_collection) do authorized_client[TEST_COLL] end # Gets the default test collection from the unauthorized client. # # @since 2.0.0 context.let(:unauthorized_collection) do unauthorized_client[TEST_COLL] end # Gets a primary server for the default authorized client. # # @since 2.0.0 context.let(:authorized_primary) do authorized_client.cluster.next_primary end # Get a primary server for the client authorized as the root system # administrator. # # @since 2.0.0 context.let(:root_authorized_primary) do root_authorized_client.cluster.next_primary end # Get a primary server from the unauthorized client. # # @since 2.0.0 context.let(:unauthorized_primary) do authorized_client.cluster.next_primary end # Get a default address (of the primary). # # @since 2.2.6 context.let(:default_address) do authorized_client.cluster.next_primary.address end # Get a default app metadata. # # @since 2.4.0 context.let(:app_metadata) do authorized_client.cluster.app_metadata end end end mongo-2.5.1/spec/support/sdam/0000755000004100000410000000000013257253113016262 5ustar www-datawww-datamongo-2.5.1/spec/support/sdam/single/0000755000004100000410000000000013257253113017543 5ustar www-datawww-datamongo-2.5.1/spec/support/sdam/single/ls_timeout_standalone.yml0000644000004100000410000000124513257253113024664 0ustar www-datawww-datadescription: "Parse logicalSessionTimeoutMinutes from standalone" uri: "mongodb://a" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, logicalSessionTimeoutMinutes: 7, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Standalone", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: 7, setName: } } ] mongo-2.5.1/spec/support/sdam/single/unavailable_seed.yml0000644000004100000410000000070013257253113023546 0ustar www-datawww-datadescription: "Unavailable seed" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", {}] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/not_ok_response.yml0000644000004100000410000000120313257253113023471 0ustar www-datawww-datadescription: "Handle a not-ok ismaster response" uri: "mongodb://a" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true }], ["a:27017", { ok: 0, ismaster: true }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_mongos.yml0000644000004100000410000000106513257253113025343 0ustar www-datawww-datadescription: "Connect to mongos" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_external_ip.yml0000644000004100000410000000122013257253113026344 0ustar www-datawww-datadescription: "Direct connection to RSPrimary via external IP" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["b:27017"], # Internal IP. setName: "rs" }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_slave.yml0000644000004100000410000000101613257253113025147 0ustar www-datawww-datadescription: "Direct connection to slave" uri: "mongodb://a" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: false }] ], outcome: { servers: { "a:27017": { type: "Standalone", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_rsprimary.yml0000644000004100000410000000115513257253113026071 0ustar www-datawww-datadescription: "Connect to RSPrimary" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs" }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/standalone_removed.yml0000644000004100000410000000104013257253113024132 0ustar www-datawww-datadescription: "Standalone removed from multi-server topology" uri: "mongodb://a,b" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true }] ], outcome: { servers: { "b:27017": { type: "Unknown", setName: } }, topologyType: "Unknown", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_rsarbiter.yml0000644000004100000410000000122413257253113026033 0ustar www-datawww-datadescription: "Connect to RSArbiter" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: false, arbiterOnly: true, hosts: ["a:27017", "b:27017"], setName: "rs" }] ], outcome: { servers: { "a:27017": { type: "RSArbiter", setName: "rs" } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_standalone.yml0000644000004100000410000000101013257253113026157 0ustar www-datawww-datadescription: "Connect to standalone" uri: "mongodb://a" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true }] ], outcome: { servers: { "a:27017": { type: "Standalone", setName: } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/single/direct_connection_rssecondary.yml0000644000004100000410000000123313257253113026372 0ustar www-datawww-datadescription: "Connect to RSSecondary" uri: "mongodb://a/?connect=direct" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017"], setName: "rs" }] ], outcome: { servers: { "a:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "Single", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/rs/0000755000004100000410000000000013257253113016706 5ustar www-datawww-datamongo-2.5.1/spec/support/sdam/rs/equal_electionids.yml0000644000004100000410000000300513257253113023120 0ustar www-datawww-datadescription: "New primary with equal electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # A and B claim to be primaries, with equal electionIds. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], # No choice but to believe the latter response. outcome: { servers: { "a:27017": { type: "Unknown", setName: , setVersion: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/discover_secondary.yml0000644000004100000410000000157413257253113023325 0ustar www-datawww-datadescription: "Replica set discovery from secondary" uri: "mongodb://b/?replicaSet=rs" phases: [ { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/member_reconfig.yml0000644000004100000410000000270213257253113022555 0ustar www-datawww-datadescription: "Member removed by reconfig" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_becomes_standalone.yml0000644000004100000410000000210713257253113025021 0ustar www-datawww-datadescription: "Primary becomes standalone" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["a:27017", { ok: 1, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/compatible.yml0000644000004100000410000000227113257253113021552 0ustar www-datawww-datadescription: "Replica set member with large maxWireVersion" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 1000 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", setName: "rs", logicalSessionTimeoutMinutes: null, compatible: true } } ] mongo-2.5.1/spec/support/sdam/rs/new_primary_new_setversion.yml0000644000004100000410000000604013257253113025117 0ustar www-datawww-datadescription: "New primary with greater setVersion" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A is discovered and tells us about B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"} }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # RS is reconfigured and B is elected. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000001"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # A still claims to be primary but it's ignored. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000001"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/setversion_without_electionid.yml0000644000004100000410000000372313257253113025621 0ustar www-datawww-datadescription: "setVersion is ignored if there is no electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A is discovered and tells us about B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 2 , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # B is elected, its setVersion is older but we believe it anyway, because # setVersion is only used in conjunction with electionId. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/too_old.yml0000644000004100000410000000216013257253113021067 0ustar www-datawww-datadescription: "Replica set member with default maxWireVersion of 0" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"] }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", setName: "rs", logicalSessionTimeoutMinutes: null, compatible: false } } ] mongo-2.5.1/spec/support/sdam/rs/primary_to_no_primary_mismatched_me.yml0000644000004100000410000000321213257253113026732 0ustar www-datawww-datadescription: "Primary to no primary with mismatched me" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], me: "a:27017", setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["c:27017", "d:27017"], me : "c:27017", setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "c:27017": { type: "Unknown", setName: }, "d:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/non_rs_member.yml0000644000004100000410000000113213257253113022253 0ustar www-datawww-datadescription: "Non replicaSet member responds" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["b:27017", { ok: 1, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_changes_set_name.yml0000644000004100000410000000245013257253113024460 0ustar www-datawww-datadescription: "Primary changes setName" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary is discovered normally. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Primary changes its setName. Remove it and change the topologyType. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/ls_timeout.yml0000644000004100000410000001543113257253113021621 0ustar www-datawww-datadescription: "Parse logicalSessionTimeoutMinutes from replica set" uri: "mongodb://a/?replicaSet=rs" phases: [ # An RSPrimary responds with a non-null logicalSessionTimeoutMinutes { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017", "c:27017", "d:27017", "e:27017"], setName: "rs", logicalSessionTimeoutMinutes: 3, minWireVersion: 0, maxWireVersion: 6 }], ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", }, "c:27017": { type: "Unknown", }, "d:27017": { type: "Unknown", }, "e:27017": { type: "Unknown", } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: 3, setName: "rs", } }, # An RSGhost responds without a logicalSessionTimeoutMinutes { responses: [ ["d:27017", { ok: 1, ismaster: false, isreplicaset: true, minWireVersion: 0, maxWireVersion: 6 }], ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", }, "c:27017": { type: "Unknown", }, "d:27017": { type: "RSGhost", }, "e:27017": { type: "Unknown", } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: 3, setName: "rs", } }, # An RSArbiter responds without a logicalSessionTimeoutMinutes { responses: [ ["e:27017", { ok: 1, ismaster: false, hosts: ["a:27017", "b:27017", "c:27017", "d:27017", "e:27017"], setName: "rs", arbiterOnly: true, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", }, "c:27017": { type: "Unknown", }, "d:27017": { type: "RSGhost", }, "e:27017": { type: "RSArbiter", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: 3, setName: "rs", } }, # An RSSecondary responds with a lower logicalSessionTimeoutMinutes { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017", "c:27017", "d:27017", "e:27017"], setName: "rs", logicalSessionTimeoutMinutes: 2, minWireVersion: 0, maxWireVersion: 6 }], ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "Unknown", }, "d:27017": { type: "RSGhost", }, "e:27017": { type: "RSArbiter", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: 2, setName: "rs", } }, # An RSOther responds with an even lower logicalSessionTimeoutMinutes, which is ignored { responses: [ ["c:27017", { ok: 1, ismaster: false, setName: "rs", hidden: true, logicalSessionTimeoutMinutes: 1, minWireVersion: 0, maxWireVersion: 6 }], ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "RSOther", setName: "rs" }, "d:27017": { type: "RSGhost", }, "e:27017": { type: "RSArbiter", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: 2, setName: "rs", } }, # Now the RSSecondary responds with no logicalSessionTimeoutMinutes { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017", "c:27017", "d:27017", "e:27017"], setName: "rs", logicalSessionTimeoutMinutes: null, minWireVersion: 0, maxWireVersion: 6 }] ], # Sessions aren't supported now outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "RSOther", setName: "rs" }, "d:27017": { type: "RSGhost", }, "e:27017": { type: "RSArbiter", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/stepdown_change_set_name.yml0000644000004100000410000000260113257253113024453 0ustar www-datawww-datadescription: "Primary becomes a secondary with wrong setName" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary is discovered normally. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Primary changes its setName and becomes secondary. # Remove it and change the topologyType. { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/discover_arbiters.yml0000644000004100000410000000154313257253113023145 0ustar www-datawww-datadescription: "Discover arbiters" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], arbiters: ["b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/new_primary.yml0000644000004100000410000000306213257253113021766 0ustar www-datawww-datadescription: "New primary" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["b:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: }, "b:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_wrong_set_name.yml0000644000004100000410000000110213257253113024175 0ustar www-datawww-datadescription: "Primary wrong setName" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/discovery.yml0000644000004100000410000001052313257253113021441 0ustar www-datawww-datadescription: "Replica set discovery" uri: "mongodb://a/?replicaSet=rs" phases: [ # At first, a, b, and c are secondaries. { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "Unknown", setName: }, "c:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Admin removes a, adds a high-priority member d which becomes primary. { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", primary: "d:27017", hosts: ["b:27017", "c:27017", "d:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "Unknown", setName: }, "d:27017": { type: "PossiblePrimary", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Primary responds. { responses: [ ["d:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["b:27017", "c:27017", "d:27017", "e:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { # e is new. servers: { "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "Unknown", setName: }, "d:27017": { type: "RSPrimary", setName: "rs" }, "e:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Stale response from c. { responses: [ ["c:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { # We don't add a back. # We don't remove e. servers: { "b:27017": { type: "RSSecondary", setName: "rs" }, "c:27017": { type: "RSSecondary", setName: "rs" }, "d:27017": { type: "RSPrimary", setName: "rs" }, "e:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/secondary_mismatched_me.yml0000644000004100000410000000113713257253113024301 0ustar www-datawww-datadescription: Secondary mismatched me phases: - outcome: servers: 'a:27017': setName: null type: Unknown 'b:27017': setName: null type: Unknown setName: rs topologyType: ReplicaSetNoPrimary logicalSessionTimeoutMinutes: null responses: - - 'localhost:27017' - me: 'a:27017' hosts: - 'a:27017' - 'b:27017' ismaster: false ok: 1 setName: rs minWireVersion: 0 maxWireVersion: 6 uri: 'mongodb://localhost:27017/?replicaSet=rs' mongo-2.5.1/spec/support/sdam/rs/hosts_differ_from_seeds.yml0000644000004100000410000000130613257253113024316 0ustar www-datawww-datadescription: "Host list differs from seeds" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/normalize_case.yml0000644000004100000410000000202013257253113022416 0ustar www-datawww-datadescription: "Replica set case normalization" uri: "mongodb://A/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["A:27017"], passives: ["B:27017"], arbiters: ["C:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: }, "c:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/rsother_discovered.yml0000644000004100000410000000266513257253113023337 0ustar www-datawww-datadescription: "RSOther discovered" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, hidden: true, hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: false, secondary: false, hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSOther", setName: "rs" }, "b:27017": { type: "RSOther", setName: "rs" }, "c:27017": { type: "Unknown", setName: }, "d:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/use_setversion_without_electionid.yml0000644000004100000410000000574513257253113026503 0ustar www-datawww-datadescription: "Record max setVersion, even from primary without electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A has setVersion and electionId, tells us about B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"} }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Reconfig the set and elect B, it has a new setVersion but no electionId. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 2 } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Delayed response from A, reporting its reelection. Its setVersion shows # the election preceded B's so we ignore it. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 2 } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/discover_primary.yml0000644000004100000410000000152413257253113023014 0ustar www-datawww-datadescription: "Replica set discovery from primary" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/member_standalone.yml0000644000004100000410000000232413257253113023111 0ustar www-datawww-datadescription: "Member brought up as standalone" uri: "mongodb://a,b" phases: [ { responses: [ ["b:27017", { ok: 1, ismaster: true, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "Unknown", logicalSessionTimeoutMinutes: null, setName: } }, { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/new_primary_new_electionid.yml0000644000004100000410000000603013257253113025034 0ustar www-datawww-datadescription: "New primary with greater setVersion and electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A is discovered and tells us about B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"} }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # B is elected. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # A still claims to be primary but it's ignored. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/wrong_set_name.yml0000644000004100000410000000135513257253113022444 0ustar www-datawww-datadescription: "Wrong setName" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["b:27017", "c:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/set_version_without_electionid.yml0000644000004100000410000000334713257253113025762 0ustar www-datawww-datadescription: "setVersion is ignored if there is no electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A is discovered and tells us about B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 2 , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", setName: "rs", } }, # B is elected, its setVersion is older but we believe it anyway, because # setVersion is only used in conjunction with electionId. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: } }, topologyType: "ReplicaSetWithPrimary", setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/unexpected_mongos.yml0000644000004100000410000000102513257253113023155 0ustar www-datawww-datadescription: "Unexpected mongos" uri: "mongodb://b/?replicaSet=rs" phases: [ { responses: [ ["b:27017", { ok: 1, ismaster: true, msg: "isdbgrid", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/new_primary_wrong_set_name.yml0000644000004100000410000000320613257253113025055 0ustar www-datawww-datadescription: "New primary with wrong setName" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary is discovered normally, and tells us about server B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # B is actually the primary of another replica set. It's removed, and # topologyType remains ReplicaSetWithPrimary. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/ghost_discovered.yml0000644000004100000410000000141613257253113022766 0ustar www-datawww-datadescription: "Ghost discovered" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["b:27017", { ok: 1, ismaster: false, isreplicaset: true, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: }, "b:27017": { type: "RSGhost", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_disconnect_setversion.yml0000644000004100000410000001136513257253113025614 0ustar www-datawww-datadescription: "Disconnected from primary, reject primary with stale setVersion" uri: "mongodb://a/?replicaSet=rs" phases: [ # A is elected, then B after a reconfig. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000001"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Disconnected from B. { responses: [ ["b:27017", {}] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # A still claims to be primary but it's ignored. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Now A is re-elected. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000002"} }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # B comes back as secondary. { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 2, electionId: {"$oid": "000000000000000000000002"} }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/secondary_wrong_set_name_with_primary.yml0000644000004100000410000000300013257253113027276 0ustar www-datawww-datadescription: "Secondary wrong setName with primary" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_disconnect_electionid.yml0000644000004100000410000001134413257253113025527 0ustar www-datawww-datadescription: "Disconnected from primary, reject primary with stale electionId" uri: "mongodb://a/?replicaSet=rs" phases: [ # A is elected, then B. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"} } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Disconnected from B. { responses: [ ["b:27017", {}] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # A still claims to be primary but it's ignored. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Now A is re-elected. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000003"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000003"} }, "b:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # B comes back as secondary. { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000003"} }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/primary_mismatched_me.yml0000644000004100000410000000113413257253113023772 0ustar www-datawww-datadescription: Primary mismatched me phases: - outcome: servers: 'a:27017': setName: null type: Unknown 'b:27017': setName: null type: Unknown setName: rs topologyType: ReplicaSetNoPrimary logicalSessionTimeoutMinutes: null responses: - - 'localhost:27017' - me: 'a:27017' hosts: - 'a:27017' - 'b:27017' ismaster: true ok: 1 setName: rs minWireVersion: 0 maxWireVersion: 6 uri: 'mongodb://localhost:27017/?replicaSet=rs' mongo-2.5.1/spec/support/sdam/rs/secondary_wrong_set_name.yml0000644000004100000410000000115213257253113024506 0ustar www-datawww-datadescription: "Secondary wrong setName" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: {}, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_reports_new_member.yml0000644000004100000410000000716113257253113025077 0ustar www-datawww-datadescription: "Primary reports a new member" uri: "mongodb://a/?replicaSet=rs" phases: [ # At first, a is a secondary. { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # b is the primary. { responses: [ ["b:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # Admin adds a secondary member c. { responses: [ ["b:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { # c is new. servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "RSPrimary", setName: "rs" }, "c:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, # c becomes secondary. { responses: [ ["c:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", primary: "b:27017", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { # c is a secondary. servers: { "a:27017": { type: "RSSecondary", setName: "rs" }, "b:27017": { type: "RSPrimary", setName: "rs" }, "c:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/sec_not_auth.yml0000644000004100000410000000224113257253113022103 0ustar www-datawww-datadescription: "Secondary's host list is not authoritative" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["b:27017", "c:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/discover_passives.yml0000644000004100000410000000330713257253113023167 0ustar www-datawww-datadescription: "Discover passives" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], passives: ["b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, passive: true, hosts: ["a:27017"], passives: ["b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/primary_hint_from_secondary_with_mismatched_me.yml0000644000004100000410000000300613257253113031141 0ustar www-datawww-datadescription: "Secondary with mismatched 'me' tells us who the primary is" uri: "mongodb://a/?replicaSet=rs" phases: [ # A is a secondary with mismatched "me". Remove A, add PossiblePrimary B. { responses: [ ["a:27017", { ok: 1, ismaster: false, secondary: true, me: "c:27017", hosts: ["b:27017"], setName: "rs", primary: "b:27017", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "b:27017": { type: "PossiblePrimary", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # Discover B is primary. { responses: [ ["b:27017", { ok: 1, ismaster: true, me: "b:27017", hosts: ["b:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "b:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/response_from_removed.yml0000644000004100000410000000256613257253113024044 0ustar www-datawww-datadescription: "Response from removed server" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/rs/too_new.yml0000644000004100000410000000227413257253113021110 0ustar www-datawww-datadescription: "Replica set member with large minWireVersion" uri: "mongodb://a,b/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: false, secondary: true, setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 999, maxWireVersion: 1000 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" }, "b:27017": { type: "RSSecondary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", setName: "rs", logicalSessionTimeoutMinutes: null, compatible: false } } ] mongo-2.5.1/spec/support/sdam/rs/null_election_id.yml0000644000004100000410000001102513257253113022740 0ustar www-datawww-datadescription: "Primaries with and without electionIds" uri: "mongodb://a/?replicaSet=rs" phases: [ # Primary A has no electionId. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017", "c:27017"], setVersion: 1, setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: }, "b:27017": { type: "Unknown", setName: , electionId: }, "c:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # B is elected, it has an electionId. { responses: [ ["b:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017", "c:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: , electionId: }, "b:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000002"} }, "c:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # A still claims to be primary, no electionId, we have to trust it. { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017", "c:27017"], setVersion: 1, setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: }, "b:27017": { type: "Unknown", setName: , electionId: }, "c:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } }, # But we remember B's electionId, so when we finally hear from C # claiming it is primary, we ignore it due to its outdated electionId { responses: [ ["c:27017", { ok: 1, ismaster: true, hosts: ["a:27017", "b:27017", "c:27017"], setName: "rs", setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { # Still primary. "a:27017": { type: "RSPrimary", setName: "rs", setVersion: 1, electionId: }, "b:27017": { type: "Unknown", setName: , electionId: }, "c:27017": { type: "Unknown", setName: , electionId: } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs", } } ] mongo-2.5.1/spec/support/sdam/rs/primary_disconnect.yml0000644000004100000410000000211613257253113023325 0ustar www-datawww-datadescription: "Disconnected from primary" uri: "mongodb://a/?replicaSet=rs" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, hosts: ["a:27017"], setName: "rs", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "RSPrimary", setName: "rs" } }, topologyType: "ReplicaSetWithPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } }, { responses: [ ["a:27017", {}] ], outcome: { servers: { "a:27017": { type: "Unknown", setName: } }, topologyType: "ReplicaSetNoPrimary", logicalSessionTimeoutMinutes: null, setName: "rs" } } ] mongo-2.5.1/spec/support/sdam/sharded/0000755000004100000410000000000013257253113017674 5ustar www-datawww-datamongo-2.5.1/spec/support/sdam/sharded/multiple_mongoses.yml0000644000004100000410000000146213257253113024167 0ustar www-datawww-datadescription: "Multiple mongoses" uri: "mongodb://a,b" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }], ["b:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/sharded/non_mongos_removed.yml0000644000004100000410000000137013257253113024315 0ustar www-datawww-datadescription: "Non-Mongos server in sharded cluster" uri: "mongodb://a,b" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }], ["b:27017", { ok: 1, ismaster: true, hosts: ["b:27017"], setName: "rs" }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/sharded/normalize_uri_case.yml0000644000004100000410000000101013257253113024261 0ustar www-datawww-datadescription: "Normalize URI case" uri: "mongodb://A,B" phases: [ { responses: [ ], outcome: { servers: { "a:27017": { type: "Unknown", setName: }, "b:27017": { type: "Unknown", setName: } }, topologyType: "Unknown", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/sharded/ls_timeout_mongos.yml0000644000004100000410000000421313257253113024165 0ustar www-datawww-datadescription: "Parse logicalSessionTimeoutMinutes from mongoses" uri: "mongodb://a,b" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid", logicalSessionTimeoutMinutes: 1, minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: true, msg: "isdbgrid", logicalSessionTimeoutMinutes: 2, minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: 1, # Minimum of the two setName: } }, # Now an ismaster response with no logicalSessionTimeoutMinutes { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid", logicalSessionTimeoutMinutes: 1, minWireVersion: 0, maxWireVersion: 6 }], ["b:27017", { ok: 1, ismaster: true, msg: "isdbgrid", minWireVersion: 0, maxWireVersion: 6 }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, # Sessions not supported now setName: } } ] mongo-2.5.1/spec/support/sdam/sharded/mongos_disconnect.yml0000644000004100000410000000363113257253113024135 0ustar www-datawww-datadescription: "Mongos disconnect" uri: "mongodb://a,b" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }], ["b:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, setName: } }, { responses: [ ["a:27017", {}], # Hangup. ], outcome: { servers: { "a:27017": { type: "Unknown", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, setName: } }, { responses: [ # Back in action. ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }], ], outcome: { servers: { "a:27017": { type: "Mongos", setName: }, "b:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", logicalSessionTimeoutMinutes: null, setName: } } ] mongo-2.5.1/spec/support/sdam/sharded/single_mongos.yml0000644000004100000410000000076313257253113023270 0ustar www-datawww-datadescription: "Single mongos" uri: "mongodb://a" phases: [ { responses: [ ["a:27017", { ok: 1, ismaster: true, msg: "isdbgrid" }] ], outcome: { servers: { "a:27017": { type: "Mongos", setName: } }, topologyType: "Sharded", setName: } } ] mongo-2.5.1/spec/support/sdam_monitoring/0000755000004100000410000000000013257253113020527 5ustar www-datawww-datamongo-2.5.1/spec/support/sdam_monitoring/replica_set_with_removal.yml0000644000004100000410000000560113257253113026326 0ustar www-datawww-datadescription: "Monitoring a replica set with non member" uri: "mongodb://a,b/" phases: - responses: - - "a:27017" - { ok: 1, ismaster: true, setName: "rs", setVersion: 1.0, primary: "a:27017", hosts: [ "a:27017" ], minWireVersion: 0, maxWireVersion: 4 } - - "b:27017" - { ok: 1, ismaster: true } outcome: events: - topology_opening_event: topologyId: "42" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: [] newDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - server_opening_event: topologyId: "42" address: "a:27017" - server_opening_event: topologyId: "42" address: "b:27017" - server_description_changed_event: topologyId: "42" address: "a:27017" previousDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: address: "a:27017" arbiters: [] hosts: [ "a:27017" ] passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" - server_closed_event: topologyId: "42" address: "b:27017" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: topologyType: "ReplicaSetWithPrimary" setName: "rs" servers: - address: "a:27017" arbiters: [] hosts: [ "a:27017" ] passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" mongo-2.5.1/spec/support/sdam_monitoring/required_replica_set.yml0000644000004100000410000000454713257253113025456 0ustar www-datawww-datadescription: "Monitoring a topology that is required to be a replica set" uri: "mongodb://a,b/?replicaSet=rs" phases: - responses: - - "a:27017" - { ok: 1, ismaster: true, setName: "rs", setVersion: 1.0, primary: "a:27017", hosts: [ "a:27017", "b:27017" ], minWireVersion: 0, maxWireVersion: 4 } outcome: events: - topology_opening_event: topologyId: "42" - server_opening_event: topologyId: "42" address: "a:27017" - server_opening_event: topologyId: "42" address: "b:27017" - server_description_changed_event: topologyId: "42" address: "a:27017" previousDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: address: "a:27017" arbiters: [] hosts: [ "a:27017", "b:27017" ] passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "ReplicaSetNoPrimary" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: topologyType: "ReplicaSetWithPrimary" setName: "rs" servers: - address: "a:27017" arbiters: [] hosts: [ "a:27017", "b:27017" ] passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" mongo-2.5.1/spec/support/sdam_monitoring/standalone.yml0000644000004100000410000000362713257253113023412 0ustar www-datawww-datadescription: "Monitoring a standalone connection" uri: "mongodb://a:27017" phases: - responses: - - "a:27017" - { ok: 1, ismaster: true, minWireVersion: 0, maxWireVersion: 4 } outcome: events: - topology_opening_event: topologyId: "42" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: [] newDescription: topologyType: "Single" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - server_opening_event: topologyId: "42" address: "a:27017" - server_description_changed_event: topologyId: "42" address: "a:27017" previousDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Standalone" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Single" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: topologyType: "Single" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Standalone" mongo-2.5.1/spec/support/sdam_monitoring/replica_set_with_primary.yml0000644000004100000410000000604713257253113026351 0ustar www-datawww-datadescription: "Monitoring a topology that is a replica set with a primary connected" uri: "mongodb://a,b" phases: - responses: - - "a:27017" - ok: 1 ismaster: true setName: "rs" setVersion: 1 primary: "a:27017" hosts: - "a:27017" - "b:27017" minWireVersion: 0 maxWireVersion: 4 outcome: events: - topology_opening_event: topologyId: "42" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: [] newDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - server_opening_event: topologyId: "42" address: "a:27017" - server_opening_event: topologyId: "42" address: "b:27017" - server_description_changed_event: topologyId: "42" address: "a:27017" previousDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: address: "a:27017" arbiters: [] hosts: - "a:27017" - "b:27017" passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: topologyType: "ReplicaSetWithPrimary" setName: "rs" servers: - address: "a:27017" arbiters: [] hosts: - "a:27017" - "b:27017" passives: [] primary: "a:27017" setName: "rs" type: "RSPrimary" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" mongo-2.5.1/spec/support/sdam_monitoring/replica_set_with_no_primary.yml0000644000004100000410000000611513257253113027041 0ustar www-datawww-datadescription: "Monitoring a topology that is a replica set with no primary connected" uri: "mongodb://a,b" phases: - responses: - - "a:27017" - ok: 1 ismaster: false secondary: true setName: "rs" setVersion: 1 primary: "b:27017" hosts: - "a:27017" - "b:27017" minWireVersion: 0 maxWireVersion: 4 outcome: events: - topology_opening_event: topologyId: "42" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: [] newDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - server_opening_event: topologyId: "42" address: "a:27017" - server_opening_event: topologyId: "42" address: "b:27017" - server_description_changed_event: topologyId: "42" address: "a:27017" previousDescription: address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: address: "a:27017" arbiters: [] hosts: - "a:27017" - "b:27017" passives: [] primary: "b:27017" setName: "rs" type: "RSSecondary" - topology_description_changed_event: topologyId: "42" previousDescription: topologyType: "Unknown" servers: - address: "a:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "Unknown" newDescription: topologyType: "ReplicaSetNoPrimary" setName: "rs" servers: - address: "a:27017" arbiters: [] hosts: - "a:27017" - "b:27017" passives: [] primary: "b:27017" setName: "rs" type: "RSSecondary" - address: "b:27017" arbiters: [] hosts: [] passives: [] type: "PossiblePrimary" mongo-2.5.1/lib/0000755000004100000410000000000013257253113013436 5ustar www-datawww-datamongo-2.5.1/lib/mongo/0000755000004100000410000000000013257253113014555 5ustar www-datawww-datamongo-2.5.1/lib/mongo/address.rb0000644000004100000410000001307113257253113016531 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/address/ipv4' require 'mongo/address/ipv6' require 'mongo/address/unix' module Mongo # Represents an address to a server, either with an IP address or socket # path. # # @since 2.0.0 class Address extend Forwardable # Mapping from socket family to resolver class. # # @since 2.0.0 FAMILY_MAP = { ::Socket::PF_UNIX => Unix, ::Socket::AF_INET6 => IPv6, ::Socket::AF_INET => IPv4 }.freeze # The localhost constant. # # @since 2.1.0 LOCALHOST = 'localhost'.freeze # @return [ String ] seed The seed address. attr_reader :seed # @return [ String ] host The original host name. attr_reader :host # @return [ Integer ] port The port. attr_reader :port # Check equality of the address to another. # # @example Check address equality. # address == other # # @param [ Object ] other The other object. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Address) host == other.host && port == other.port end # Check equality for hashing. # # @example Check hashing equality. # address.eql?(other) # # @param [ Object ] other The other object. # # @return [ true, false ] If the objects are equal. # # @since 2.2.0 def eql?(other) self == other end # Calculate the hash value for the address. # # @example Calculate the hash value. # address.hash # # @return [ Integer ] The hash value. # # @since 2.0.0 def hash [ host, port ].hash end # Initialize the address. # # @example Initialize the address with a DNS entry and port. # Mongo::Address.new("app.example.com:27017") # # @example Initialize the address with a DNS entry and no port. # Mongo::Address.new("app.example.com") # # @example Initialize the address with an IPV4 address and port. # Mongo::Address.new("127.0.0.1:27017") # # @example Initialize the address with an IPV4 address and no port. # Mongo::Address.new("127.0.0.1") # # @example Initialize the address with an IPV6 address and port. # Mongo::Address.new("[::1]:27017") # # @example Initialize the address with an IPV6 address and no port. # Mongo::Address.new("[::1]") # # @example Initialize the address with a unix socket. # Mongo::Address.new("/path/to/socket.sock") # # @param [ String ] seed The provided address. # @param [ Hash ] options The address options. # # @since 2.0.0 def initialize(seed, options = {}) @seed = seed @host, @port = parse_host_port @options = options end # Get a pretty printed address inspection. # # @example Get the address inspection. # address.inspect # # @return [ String ] The nice inspection string. # # @since 2.0.0 def inspect "#" end # Get a socket for the provided address, given the options. # # @example Get a socket. # address.socket(5, :ssl => true) # # @param [ Float ] socket_timeout The socket timeout. # @param [ Hash ] ssl_options SSL options. # # @return [ Pool::Socket::SSL, Pool::Socket::TCP, Pool::Socket::Unix ] The socket. # # @since 2.0.0 def socket(socket_timeout, ssl_options = {}) @resolver ||= initialize_resolver!(ssl_options) @resolver.socket(socket_timeout, ssl_options) end # Get the address as a string. # # @example Get the address as a string. # address.to_s # # @return [ String ] The nice string. # # @since 2.0.0 def to_s port ? "#{host}:#{port}" : host end # Connect a socket. # # @example Connect a socket. # address.connect_socket!(socket) # # @since 2.4.3 def connect_socket!(socket) socket.connect!(connect_timeout) end private def connect_timeout @connect_timeout ||= @options[:connect_timeout] || Server::CONNECT_TIMEOUT end def initialize_resolver!(ssl_options) return Unix.new(seed.downcase) if seed.downcase =~ Unix::MATCH family = (host == LOCALHOST) ? ::Socket::AF_INET : ::Socket::AF_UNSPEC error = nil ::Socket.getaddrinfo(host, nil, family, ::Socket::SOCK_STREAM).each do |info| begin _host = (host == LOCALHOST) ? info[3] : host res = FAMILY_MAP[info[4]].new(_host, port, host) res.socket(connect_timeout, ssl_options).connect!(connect_timeout).close return res rescue IOError, SystemCallError, Error::SocketTimeoutError, Error::SocketError => e error = e end end raise error end def parse_host_port address = seed.downcase case address when Unix::MATCH then Unix.parse(address) when IPv6::MATCH then IPv6.parse(address) else IPv4.parse(address) end end end end mongo-2.5.1/lib/mongo/protocol.rb0000644000004100000410000000075713257253113016754 0ustar www-datawww-data# Wire Protocol Base require 'mongo/protocol/serializers' require 'mongo/protocol/registry' require 'mongo/protocol/bit_vector' require 'mongo/protocol/message' # Client Requests require 'mongo/protocol/compressed' require 'mongo/protocol/delete' require 'mongo/protocol/get_more' require 'mongo/protocol/insert' require 'mongo/protocol/kill_cursors' require 'mongo/protocol/query' require 'mongo/protocol/update' require 'mongo/protocol/msg' # Server Responses require 'mongo/protocol/reply' mongo-2.5.1/lib/mongo/options/0000755000004100000410000000000013257253113016250 5ustar www-datawww-datamongo-2.5.1/lib/mongo/options/mapper.rb0000644000004100000410000000745313257253113020072 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Options # Utility class for various options mapping behaviour. # # @since 2.0.0 module Mapper extend self # Transforms the provided options to a new set of options given the # provided mapping. # # @example Transform the options. # Mapper.transform({ name: 1 }, { :name => :nombre }) # # @param [ Hash ] options The options to transform # @param [ Hash ] mappings The key mappings. # # @return [ Hash ] The transformed options. # # @since 2.0.0 def transform(options, mappings) map = transform_keys_to_strings(mappings) opts = transform_keys_to_strings(options) opts.reduce({}) do |transformed, (key, value)| transformed[map[key]] = value if map[key] transformed end end # Transforms the provided options to a new set of options given the # provided mapping. Expects BSON::Documents in and out so no explicit # string conversion needs to happen. # # @example Transform the options. # Mapper.transform_documents({ name: 1 }, { :name => :nombre }) # # @param [ BSON::Document ] options The options to transform # @param [ BSON::Document ] mappings The key mappings. # @param [ BSON::Document ] document The output document. # # @return [ BSON::Document ] The transformed options. # # @since 2.0.0 def transform_documents(options, mappings, document = BSON::Document.new) options.reduce(document) do |transformed, (key, value)| name = mappings[key] transformed[name] = value if name && !value.nil? transformed end end # Coverts all the keys of the options to strings. # # @example Convert all option keys to strings. # Mapper.transform({ :name => 1 }) # # @param [ Hash ] options The options to transform. # # @return [ Hash ] The transformed options. # # @since 2.0.0 def transform_keys_to_strings(options) options.reduce({}) do |transformed, (key, value)| transformed[key.to_s] = value transformed end end # Coverts all the keys of the options to symbols. # # @example Convert all option keys to symbols. # Mapper.transform({ 'name' => 1 }) # # @param [ Hash ] options The options to transform. # # @return [ Hash ] The transformed options. # # @since 2.2.2 def transform_keys_to_symbols(options) options.reduce({}) do |transformed, (key, value)| transformed[key.to_sym] = value transformed end end # Coverts all the symbol values to strings. # # @example Convert all option symbol values to strings. # Mapper.transform({ :name => 1 }) # # @param [ Hash ] options The options to transform. # # @return [ Hash ] The transformed options. # # @since 2.0.0 def transform_values_to_strings(options) options.reduce({}) do |transformed, (key, value)| transformed[key] = value.is_a?(Symbol) ? value.to_s : value transformed end end end end end mongo-2.5.1/lib/mongo/options/redacted.rb0000644000004100000410000001123513257253113020352 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Options # Class for wrapping options that could be sensitive. # When printed, the sensitive values will be redacted. # # @since 2.1.0 class Redacted < BSON::Document # The options whose values will be redacted. # # @since 2.1.0 SENSITIVE_OPTIONS = [ :password, :pwd ].freeze # The replacement string used in place of the value for sensitive keys. # # @since 2.1.0 STRING_REPLACEMENT = ''.freeze # Get a string representation of the options. # # @return [ String ] The string representation of the options. # # @since 2.1.0 def inspect redacted_string(:inspect) end # Get a string representation of the options. # # @return [ String ] The string representation of the options. # # @since 2.1.0 def to_s redacted_string(:to_s) end # Whether these options contain a given key. # # @example Determine if the options contain a given key. # options.has_key?(:name) # # @param [ String, Symbol ] key The key to check for existence. # # @return [ true, false ] If the options contain the given key. # # @since 2.1.0 def has_key?(key) super(convert_key(key)) end alias_method :key?, :has_key? # Returns a new options object consisting of pairs for which the block returns false. # # @example Get a new options object with pairs for which the block returns false. # new_options = options.reject { |k, v| k == 'database' } # # @yieldparam [ String, Object ] The key as a string and its value. # # @return [ Options::Redacted ] A new options object. # # @since 2.1.0 def reject(&block) new_options = dup new_options.reject!(&block) || new_options end # Only keeps pairs for which the block returns false. # # @example Remove pairs from this object for which the block returns true. # options.reject! { |k, v| k == 'database' } # # @yieldparam [ String, Object ] The key as a string and its value. # # @return [ Options::Redacted, nil ] This object or nil if no changes were made. # # @since 2.1.0 def reject! if block_given? n_keys = keys.size keys.each do |key| delete(key) if yield(key, self[key]) end n_keys == keys.size ? nil : self else to_enum end end # Returns a new options object consisting of pairs for which the block returns true. # # @example Get a new options object with pairs for which the block returns true. # ssl_options = options.select { |k, v| k =~ /ssl/ } # # @yieldparam [ String, Object ] The key as a string and its value. # # @return [ Options::Redacted ] A new options object. # # @since 2.1.0 def select(&block) new_options = dup new_options.select!(&block) || new_options end # Only keeps pairs for which the block returns true. # # @example Remove pairs from this object for which the block does not return true. # options.select! { |k, v| k =~ /ssl/ } # # @yieldparam [ String, Object ] The key as a string and its value. # # @return [ Options::Redacted, nil ] This object or nil if no changes were made. # # @since 2.1.0 def select! if block_given? n_keys = keys.size keys.each do |key| delete(key) unless yield(key, self[key]) end n_keys == keys.size ? nil : self else to_enum end end private def redacted_string(method) '{' + reduce([]) do |list, (k, v)| list << "#{k.send(method)}=>#{redact(k, v, method)}" end.join(', ') + '}' end def redact(k, v, method) return STRING_REPLACEMENT if SENSITIVE_OPTIONS.include?(k.to_sym) v.send(method) end end end end mongo-2.5.1/lib/mongo/index.rb0000644000004100000410000000277313257253113016222 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/index/view' module Mongo # Contains constants for indexing purposes. # # @since 2.0.0 module Index # Wildcard constant for all. # # @since 2.1.0 ALL = '*'.freeze # Specify ascending order for an index. # # @since 2.0.0 ASCENDING = 1 # Specify descending order for an index. # # @since 2.0.0 DESCENDING = -1 # Specify a 2d Geo index. # # @since 2.0.0 GEO2D = '2d'.freeze # Specify a 2d sphere Geo index. # # @since 2.0.0 GEO2DSPHERE = '2dsphere'.freeze # Specify a geoHaystack index. # # @since 2.0.0 GEOHAYSTACK = 'geoHaystack'.freeze # Encodes a text index. # # @since 2.0.0 TEXT = 'text'.freeze # Specify a hashed index. # # @since 2.0.0 HASHED = 'hashed'.freeze # Constant for the indexes collection. # # @since 2.0.0 COLLECTION = 'system.indexes'.freeze end end mongo-2.5.1/lib/mongo/dbref.rb0000644000004100000410000000607113257253113016170 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo # Represents a DBRef document in the database. # # @since 2.1.0 class DBRef include BSON::JSON # The constant for the collection reference field. # # @since 2.1.0 COLLECTION = '$ref'.freeze # The constant for the id field. # # @since 2.1.0 ID = '$id'.freeze # The constant for the database field. # # @since 2.1.0 DATABASE = '$db'.freeze # @return [ String ] collection The collection name. attr_reader :collection # @return [ BSON::ObjectId ] id The referenced document id. attr_reader :id # @return [ String ] database The database name. attr_reader :database # Get the DBRef as a JSON document # # @example Get the DBRef as a JSON hash. # dbref.as_json # # @return [ Hash ] The max key as a JSON hash. # # @since 2.1.0 def as_json(*args) document = { COLLECTION => collection, ID => id } document.merge!(DATABASE => database) if database document end # Instantiate a new DBRef. # # @example Create the DBRef. # Mongo::DBRef.new('users', id, 'database') # # @param [ String ] collection The collection name. # @param [ BSON::ObjectId ] id The object id. # @param [ String ] database The database name. # # @since 2.1.0 def initialize(collection, id, database = nil) @collection = collection @id = id @database = database end # Converts the DBRef to raw BSON. # # @example Convert the DBRef to raw BSON. # dbref.to_bson # # @param [ BSON::ByteBuffer ] buffer The encoded BSON buffer to append to. # @param [ true, false ] validating_keys Whether keys should be validated when serializing. # # @return [ String ] The raw BSON. # # @since 2.1.0 def to_bson(buffer = BSON::ByteBuffer.new, validating_keys = BSON::Config.validating_keys?) as_json.to_bson(buffer) end module ClassMethods # Deserialize the hash from BSON, converting to a DBRef if appropriate. # # @param [ String ] buffer The bson representing a hash. # # @return [ Hash, DBRef ] The decoded hash or DBRef. # # @see http://bsonspec.org/#/specification # # @since 2.0.0 def from_bson(buffer) decoded = super if ref = decoded[COLLECTION] decoded = DBRef.new(ref, decoded[ID], decoded[DATABASE]) end decoded end end end ::Hash.send(:extend, DBRef::ClassMethods) end mongo-2.5.1/lib/mongo/bson.rb0000644000004100000410000000164713257253113016053 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Patch for allowing deprecated symbols to be used. # # @since 2.2.1 class Symbol # Overrides the default BSON type to use the symbol type instead of a # string type. # # @example Get the bson type. # :test.bson_type # # @return [ String ] The character 14. # # @since 2.2.1 def bson_type BSON::Symbol::BSON_TYPE end end mongo-2.5.1/lib/mongo/collection/0000755000004100000410000000000013257253113016710 5ustar www-datawww-datamongo-2.5.1/lib/mongo/collection/view.rb0000644000004100000410000001624013257253113020212 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/collection/view/builder' require 'mongo/collection/view/immutable' require 'mongo/collection/view/iterable' require 'mongo/collection/view/explainable' require 'mongo/collection/view/aggregation' require 'mongo/collection/view/change_stream' require 'mongo/collection/view/map_reduce' require 'mongo/collection/view/readable' require 'mongo/collection/view/writable' module Mongo class Collection # Representation of a query and options producing a result set of documents. # # A +View+ can be modified using helpers. Helpers can be chained, # as each one returns a +View+ if arguments are provided. # # The query message is sent to the server when a "terminator" is called. # For example, when #each is called on a +View+, a Cursor object is # created, which then sends the query to the server. # # A +View+ is not created directly by a user. Rather, +View+ # creates a +View+ when a CRUD operation is called and returns it to # the user to interact with. # # @note The +View+ API is semipublic. # @api semipublic class View extend Forwardable include Enumerable include Immutable include Iterable include Readable include Retryable include Explainable include Writable # @return [ Collection ] The +Collection+ to query. attr_reader :collection # @return [ Hash ] The query filter. attr_reader :filter # Delegate necessary operations to the collection. def_delegators :collection, :client, :cluster, :database # Delegate to the cluster for the next primary. def_delegators :cluster, :next_primary alias :selector :filter # Compare two +View+ objects. # # @example Compare the view with another object. # view == other # # @return [ true, false ] Equal if collection, filter, and options of two # +View+ match. # # @since 2.0.0 def ==(other) return false unless other.is_a?(View) collection == other.collection && filter == other.filter && options == other.options end alias_method :eql?, :== # A hash value for the +View+ composed of the collection namespace, # hash of the options and hash of the filter. # # @example Get the hash value. # view.hash # # @return [ Integer ] A hash value of the +View+ object. # # @since 2.0.0 def hash [ collection.namespace, options.hash, filter.hash ].hash end # Creates a new +View+. # # @example Find all users named Emily. # View.new(collection, {:name => 'Emily'}) # # @example Find all users named Emily skipping 5 and returning 10. # View.new(collection, {:name => 'Emily'}, :skip => 5, :limit => 10) # # @example Find all users named Emily using a specific read preference. # View.new(collection, {:name => 'Emily'}, :read => :secondary_preferred) # # @param [ Collection ] collection The +Collection+ to query. # @param [ Hash ] filter The query filter. # @param [ Hash ] options The additional query options. # # @option options :comment [ String ] Associate a comment with the query. # @option options :batch_size [ Integer ] The number of docs to return in # each response from MongoDB. # @option options :fields [ Hash ] The fields to include or exclude in # returned docs. # @option options :hint [ Hash ] Override default index selection and force # MongoDB to use a specific index for the query. # @option options :limit [ Integer ] Max number of docs to return. # @option options :max_scan [ Integer ] Constrain the query to only scan the # specified number of docs. Use to prevent queries from running too long. # @option options :read [ Symbol ] The read preference to use for the query. # If none is provided, the collection's default read preference is used. # @option options :show_disk_loc [ true, false ] Return disk location info as # a field in each doc. # @option options :skip [ Integer ] The number of documents to skip. # @option options :snapshot [ true, false ] Prevents returning a doc more than # once. # @option options :sort [ Hash ] The key and direction pairs used to sort the # results. # @option options [ Hash ] :collation The collation to use. # # @since 2.0.0 def initialize(collection, filter = {}, options = {}) validate_doc!(filter) @collection = collection parse_parameters!(BSON::Document.new(filter), BSON::Document.new(options)) end # Get a human-readable string representation of +View+. # # @example Get the inspection. # view.inspect # # @return [ String ] A string representation of a +View+ instance. # # @since 2.0.0 def inspect "#" end # Get the write concern on this +View+. # # @example Get the write concern. # view.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.0.0 def write_concern WriteConcern.get(options[:write] || options[:write_concern] || collection.write_concern) end private def initialize_copy(other) @collection = other.collection @options = other.options.dup @filter = other.filter.dup end def parse_parameters!(filter, options) query = filter.delete(QUERY) modifiers = (filter || {}).merge(options.delete(MODIFIERS) || {}) @filter = (query || filter).freeze @options = Builder::Modifiers.map_driver_options(modifiers).merge!(options).freeze end def new(options) View.new(collection, filter, options) end def apply_collation!(doc, server, opts = {}) if coll = doc[:collation] || opts[:collation] || opts['collation'] || collation validate_collation!(server, coll) doc[:collation] = coll end end def validate_collation!(server, coll) if coll &&!server.features.collation_enabled? raise Error::UnsupportedCollation.new end end def view; self; end def with_session(opts = {}, &block) client.send(:with_session, @options.merge(opts), &block) end end end end mongo-2.5.1/lib/mongo/collection/view/0000755000004100000410000000000013257253113017662 5ustar www-datawww-datamongo-2.5.1/lib/mongo/collection/view/builder/0000755000004100000410000000000013257253113021310 5ustar www-datawww-datamongo-2.5.1/lib/mongo/collection/view/builder/op_query.rb0000644000004100000410000000473113257253113023505 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Builds a legacy OP_QUERY specification from options. # # @since 2.2.0 class OpQuery extend Forwardable def_delegators :@view, :cluster, :collection, :database, :filter, :options, :read # @return [ BSON::Document ] modifiers The server modifiers. attr_reader :modifiers # Create the new legacy query builder. # # @example Create the query builder. # QueryBuilder.new(view) # # @param [ Collection::View ] view The collection view. # # @since 2.2.2 def initialize(view) @view = view @modifiers = Modifiers.map_server_modifiers(options) end def specification { :selector => requires_special_filter? ? special_filter : filter, :read => read, :options => query_options, :db_name => database.name, :coll_name => collection.name } end private def query_options BSON::Document.new( project: options[:projection], skip: options[:skip], limit: options[:limit], flags: Flags.map_flags(options), batch_size: options[:batch_size] ) end def requires_special_filter? !modifiers.empty? || cluster.sharded? end def read_pref_formatted @read_formatted ||= ServerSelector.get(read).to_mongos if read end def special_filter sel = BSON::Document.new(:$query => filter).merge!(modifiers) sel[:$readPreference] = read_pref_formatted unless read_pref_formatted.nil? sel end end end end end end mongo-2.5.1/lib/mongo/collection/view/builder/aggregation.rb0000644000004100000410000000736613257253113024140 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Builds an aggregation command specification from the view and options. # # @since 2.2.0 class Aggregation extend Forwardable # The mappings from ruby options to the aggregation options. # # @since 2.2.0 MAPPINGS = BSON::Document.new( :allow_disk_use => 'allowDiskUse', :max_time_ms => 'maxTimeMS', # This is intentional; max_await_time_ms is an alias for maxTimeMS used on getmore # commands for change streams. :max_await_time_ms => 'maxTimeMS', :explain => 'explain', :bypass_document_validation => 'bypassDocumentValidation', :collation => 'collation', :hint => 'hint', :comment => 'comment' ).freeze def_delegators :@view, :collection, :database, :read, :write_concern # @return [ Array ] pipeline The pipeline. attr_reader :pipeline # @return [ Collection::View ] view The collection view. attr_reader :view # @return [ Hash ] options The map/reduce specific options. attr_reader :options # Initialize the builder. # # @example Initialize the builder. # Aggregation.new(map, reduce, view, options) # # @param [ Array ] pipeline The aggregation pipeline. # @param [ Collection::View ] view The collection view. # @param [ Hash ] options The map/reduce options. # # @since 2.2.0 def initialize(pipeline, view, options) @pipeline = pipeline @view = view @options = options end # Get the specification to pass to the aggregation operation. # # @example Get the specification. # builder.specification # # @return [ Hash ] The specification. # # @since 2.2.0 def specification spec = { selector: aggregation_command, db_name: database.name, read: read, session: @options[:session] } write? ? spec.merge!(write_concern: write_concern) : spec end private def write? pipeline.any? { |operator| operator[:$out] || operator['$out'] } end def aggregation_command command = BSON::Document.new(:aggregate => collection.name, :pipeline => pipeline) command[:cursor] = cursor if cursor command[:readConcern] = collection.read_concern if collection.read_concern command.merge!(Options::Mapper.transform_documents(options, MAPPINGS)) command end def cursor if options[:use_cursor] == true || options[:use_cursor].nil? batch_size_doc end end def batch_size_doc (value = options[:batch_size] || view.batch_size) ? { :batchSize => value } : {} end end end end end end mongo-2.5.1/lib/mongo/collection/view/builder/map_reduce.rb0000644000004100000410000001233213257253113023742 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Builds a map/reduce specification from the view and options. # # @since 2.2.0 class MapReduce extend Forwardable # The mappings from ruby options to the map/reduce options. # # @since 2.2.0 MAPPINGS = BSON::Document.new( finalize: 'finalize', js_mode: 'jsMode', out: 'out', scope: 'scope', verbose: 'verbose', bypass_document_validation: 'bypassDocumentValidation', collation: 'collation' ).freeze def_delegators :@view, :collection, :database, :filter, :read, :write_concern # @return [ String ] map The map function. attr_reader :map # @return [ String ] reduce The reduce function. attr_reader :reduce # @return [ Collection::View ] view The collection view. attr_reader :view # @return [ Hash ] options The map/reduce specific options. attr_reader :options # Initialize the builder. # # @example Initialize the builder. # MapReduce.new(map, reduce, view, options) # # @param [ String ] map The map function. # @param [ String ] reduce The reduce function. # @param [ Collection::View ] view The collection view. # @param [ Hash ] options The map/reduce options. # # @since 2.2.0 def initialize(map, reduce, view, options) @map = map @reduce = reduce @view = view @options = options end # Get the specification for issuing a find command on the map/reduce # results. # # @example Get the command specification. # builder.command_specification # # @return [ Hash ] The specification. # # @since 2.2.0 def command_specification { selector: find_command, db_name: query_database, read: read, session: options[:session] } end # Get the specification for the document query after a map/reduce. # # @example Get the query specification. # builder.query_specification # # @return [ Hash ] The specification. # # @since 2.2.0 def query_specification { selector: {}, options: {}, db_name: query_database, coll_name: query_collection } end # Get the specification to pass to the map/reduce operation. # # @example Get the specification. # builder.specification # # @return [ Hash ] The specification. # # @since 2.2.0 def specification spec = { selector: map_reduce_command, db_name: database.name, read: read, session: options[:session] } write?(spec) ? spec.merge!(write_concern: write_concern) : spec end private OUT_ACTIONS = [ :replace, :merge, :reduce ].freeze def write?(spec) if out = spec[:selector][:out] out.is_a?(String) || (out.respond_to?(:keys) && out.keys.first.to_s.downcase != View::MapReduce::INLINE) end end def find_command BSON::Document.new('find' => query_collection, 'filter' => {}) end def map_reduce_command command = BSON::Document.new( :mapreduce => collection.name, :map => map, :reduce => reduce, :query => filter, :out => { inline: 1 } ) command[:readConcern] = collection.read_concern if collection.read_concern command.merge!(view_options) command.merge!(Options::Mapper.transform_documents(options, MAPPINGS)) command end def query_database options[:out].respond_to?(:keys) && options[:out][:db] ? options[:out][:db] : database.name end def query_collection if options[:out].respond_to?(:keys) options[:out][OUT_ACTIONS.find { |action| options[:out][action] }] end || options[:out] end def view_options @view_options ||= (opts = view.options.dup opts.delete(:session) opts) end end end end end end mongo-2.5.1/lib/mongo/collection/view/builder/flags.rb0000644000004100000410000000364313257253113022737 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Provides behaviour for mapping flags. # # @since 2.2.0 module Flags extend self # Options to cursor flags mapping. # # @since 2.2.0 MAPPINGS = { :allow_partial_results => [ :partial ], :oplog_replay => [ :oplog_replay ], :no_cursor_timeout => [ :no_cursor_timeout ], :tailable => [ :tailable_cursor ], :tailable_await => [ :await_data, :tailable_cursor], :await_data => [ :await_data ], :exhaust => [ :exhaust ] }.freeze # Maps an array of flags from the provided options. # # @example Map the flags. # Flags.map_flags(options) # # @param [ Hash, BSON::Document ] options The options. # # @return [ Array ] The flags. # # @since 2.2.0 def map_flags(options) MAPPINGS.each.reduce(options[:flags] || []) do |flags, (key, value)| cursor_type = options[:cursor_type] if options[key] || (cursor_type && cursor_type == key) flags.push(*value) end flags end end end end end end end mongo-2.5.1/lib/mongo/collection/view/builder/modifiers.rb0000644000004100000410000000501513257253113023617 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Provides behaviour for mapping modifiers. # # @since 2.2.0 module Modifiers extend self # Mappings from driver options to legacy server values. # # @since 2.2.0 DRIVER_MAPPINGS = BSON::Document.new( sort: '$orderby', hint: '$hint', comment: '$comment', snapshot: '$snapshot', max_scan: '$maxScan', max_value: '$max', min_value: '$min', max_time_ms: '$maxTimeMS', return_key: '$returnKey', show_disk_loc: '$showDiskLoc', explain: '$explain' ).freeze # Mappings from server values to driver options. # # @since 2.2.0 SERVER_MAPPINGS = BSON::Document.new(DRIVER_MAPPINGS.invert).freeze # Transform the provided server modifiers to driver options. # # @example Transform to driver options. # Modifiers.map_driver_options(modifiers) # # @param [ Hash ] modifiers The modifiers. # # @return [ BSON::Document ] The driver options. # # @since 2.2.0 def self.map_driver_options(modifiers) Options::Mapper.transform_documents(modifiers, SERVER_MAPPINGS) end # Transform the provided options into a document of only server # modifiers. # # @example Map the server modifiers. # Modifiers.map_server_modifiers(options) # # @param [ Hash, BSON::Document ] options The options. # # @return [ BSON::Document ] The modifiers. # # @since 2.2.0 def self.map_server_modifiers(options) Options::Mapper.transform_documents(options, DRIVER_MAPPINGS) end end end end end end mongo-2.5.1/lib/mongo/collection/view/builder/find_command.rb0000644000004100000410000001075613257253113024264 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View module Builder # Builds a find command specification from options. # # @since 2.2.0 class FindCommand extend Forwardable # The mappings from ruby options to the find command. # # @since 2.2.0 MAPPINGS = BSON::Document.new( sort: 'sort', projection: 'projection', hint: 'hint', skip: 'skip', limit: 'limit', batch_size: 'batchSize', single_batch: 'singleBatch', comment: 'comment', max_scan: 'maxScan', max_time_ms: 'maxTimeMS', max_value: 'max', min_value: 'min', return_key: 'returnKey', show_disk_loc: 'showRecordId', snapshot: 'snapshot', tailable: 'tailable', tailable_cursor: 'tailable', oplog_replay: 'oplogReplay', no_cursor_timeout: 'noCursorTimeout', await_data: 'awaitData', allow_partial_results: 'allowPartialResults', collation: 'collation' ).freeze def_delegators :@view, :collection, :database, :filter, :options, :read # Get the specification for an explain command that wraps the find # command. # # @example Get the explain spec. # builder.explain_specification # # @return [ Hash ] The specification. # # @since 2.2.0 def explain_specification { selector: { explain: find_command }, db_name: database.name, read: read, session: @session } end # Create the find command builder. # # @example Create the find command builder. # FindCommandBuilder.new(view) # # @param [ Collection::View ] view The collection view. # @param [ Session ] session The session. # # @since 2.2.2 def initialize(view, session) @view = view @session = session end # Get the specification to pass to the find command operation. # # @example Get the specification. # builder.specification # # @return [ Hash ] The specification. # # @since 2.2.0 def specification { selector: find_command, db_name: database.name, read: read, session: @session } end private def find_command document = BSON::Document.new('find' => collection.name, 'filter' => filter) document[:readConcern] = collection.read_concern if collection.read_concern command = Options::Mapper.transform_documents(convert_flags(options), MAPPINGS, document) convert_limit_and_batch_size(command) command end def convert_limit_and_batch_size(command) if command[:limit] && command[:limit] < 0 && command[:batchSize] && command[:batchSize] < 0 command[:limit] = command[:limit].abs command[:batchSize] = command[:limit].abs command[:singleBatch] = true else [:limit, :batchSize].each do |opt| if command[opt] if command[opt] < 0 command[opt] = command[opt].abs command[:singleBatch] = true elsif command[opt] == 0 command.delete(opt) end end end end end def convert_flags(options) return options if options.empty? opts = options.dup opts.delete(:cursor_type) Flags.map_flags(options).reduce(opts) do |o, key| o.merge!(key => true) end end end end end end end mongo-2.5.1/lib/mongo/collection/view/aggregation.rb0000644000004100000410000001012513257253113022475 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Provides behaviour around an aggregation pipeline on a collection view. # # @since 2.0.0 class Aggregation extend Forwardable include Enumerable include Immutable include Iterable include Explainable include Loggable include Retryable # @return [ View ] view The collection view. attr_reader :view # @return [ Array ] pipeline The aggregation pipeline. attr_reader :pipeline # Delegate necessary operations to the view. def_delegators :view, :collection, :read, :cluster # Delegate necessary operations to the collection. def_delegators :collection, :database, :client # The reroute message. # # @since 2.1.0 REROUTE = 'Rerouting the Aggregation operation to the primary server.'.freeze # Set to true if disk usage is allowed during the aggregation. # # @example Set disk usage flag. # aggregation.allow_disk_use(true) # # @param [ true, false ] value The flag value. # # @return [ true, false, Aggregation ] The aggregation if a value was # set or the value if used as a getter. # # @since 2.0.0 def allow_disk_use(value = nil) configure(:allow_disk_use, value) end # Initialize the aggregation for the provided collection view, pipeline # and options. # # @example Create the new aggregation view. # Aggregation.view.new(view, pipeline) # # @param [ Collection::View ] view The collection view. # @param [ Array ] pipeline The pipeline of operations. # @param [ Hash ] options The aggregation options. # # @since 2.0.0 def initialize(view, pipeline, options = {}) @view = view @pipeline = pipeline.dup @options = BSON::Document.new(options).freeze end # Get the explain plan for the aggregation. # # @example Get the explain plan for the aggregation. # aggregation.explain # # @return [ Hash ] The explain plan. # # @since 2.0.0 def explain self.class.new(view, pipeline, options.merge(explain: true)).first end private def server_selector @view.send(:server_selector) end def aggregate_spec(session) Builder::Aggregation.new(pipeline, view, options.merge(session: session)).specification end def new(options) Aggregation.new(view, pipeline, options) end def initial_query_op(session) Operation::Commands::Aggregate.new(aggregate_spec(session)) end def valid_server?(server) server.standalone? || server.mongos? || server.primary? || secondary_ok? end def secondary_ok? pipeline.none? { |op| op.key?('$out') || op.key?(:$out) } end def send_initial_query(server, session) unless valid_server?(server) log_warn(REROUTE) server = cluster.next_primary(false) end validate_collation!(server) initial_query_op(session).execute(server) end def validate_collation!(server) if options[:collation] && !server.features.collation_enabled? raise Error::UnsupportedCollation.new end end end end end end mongo-2.5.1/lib/mongo/collection/view/change_stream/0000755000004100000410000000000013257253113022462 5ustar www-datawww-datamongo-2.5.1/lib/mongo/collection/view/change_stream/retryable.rb0000644000004100000410000000305413257253113025002 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View class ChangeStream < Aggregation # Behavior around resuming a change stream. # # @since 2.5.0 module Retryable private RETRY_MESSAGES = [ 'not master', '(43)' # cursor not found error code ].freeze def read_with_one_retry yield rescue => e if retryable?(e) yield else raise(e) end end def retryable?(error) network_error?(error) || retryable_operation_failure?(error) end def network_error?(error) [ Error::SocketError, Error::SocketTimeoutError].include?(error.class) end def retryable_operation_failure?(error) error.is_a?(Error::OperationFailure) && RETRY_MESSAGES.any? { |m| error.message.include?(m) } end end end end end end mongo-2.5.1/lib/mongo/collection/view/map_reduce.rb0000644000004100000410000002044013257253113022313 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Provides behaviour around a map/reduce operation on the collection # view. # # @since 2.0.0 class MapReduce extend Forwardable include Enumerable include Immutable include Loggable include Retryable # The inline option. # # @since 2.1.0 INLINE = 'inline'.freeze # Reroute message. # # @since 2.1.0 REROUTE = 'Rerouting the MapReduce operation to the primary server.'.freeze # @return [ View ] view The collection view. attr_reader :view # @return [ String ] map The map function. attr_reader :map_function # @return [ String ] reduce The reduce function. attr_reader :reduce_function # Delegate necessary operations to the view. def_delegators :view, :collection, :read, :cluster # Delegate necessary operations to the collection. def_delegators :collection, :database, :client # Iterate through documents returned by the map/reduce. # # @example Iterate through the result of the map/reduce. # map_reduce.each do |document| # p document # end # # @return [ Enumerator ] The enumerator. # # @since 2.0.0 # # @yieldparam [ Hash ] Each matching document. def each @cursor = nil session = client.send(:get_session, @options) legacy_write_with_retry do |server| result = send_initial_query(server, session) result = send_fetch_query(server, session) unless inline? @cursor = Cursor.new(view, result, server, session: session) end @cursor.each do |doc| yield doc end if block_given? @cursor.to_enum end # Set or get the finalize function for the operation. # # @example Set the finalize function. # map_reduce.finalize(function) # # @param [ String ] function The finalize js function. # # @return [ MapReduce, String ] The new MapReduce operation or the # value of the function. # # @since 2.0.0 def finalize(function = nil) configure(:finalize, function) end # Initialize the map/reduce for the provided collection view, functions # and options. # # @example Create the new map/reduce view. # # @param [ Collection::View ] view The collection view. # @param [ String ] map The map function. # @param [ String ] reduce The reduce function. # @param [ Hash ] options The map/reduce options. # # @since 2.0.0 def initialize(view, map, reduce, options = {}) @view = view @map_function = map.freeze @reduce_function = reduce.freeze @options = BSON::Document.new(options).freeze end # Set or get the jsMode flag for the operation. # # @example Set js mode for the operation. # map_reduce.js_mode(true) # # @param [ true, false ] value The jsMode value. # # @return [ MapReduce, true, false ] The new MapReduce operation or the # value of the jsMode flag. # # @since 2.0.0 def js_mode(value = nil) configure(:js_mode, value) end # Set or get the output location for the operation. # # @example Set the output to inline. # map_reduce.out(inline: 1) # # @example Set the output collection to merge. # map_reduce.out(merge: 'users') # # @example Set the output collection to replace. # map_reduce.out(replace: 'users') # # @example Set the output collection to reduce. # map_reduce.out(reduce: 'users') # # @param [ Hash ] location The output location details. # # @return [ MapReduce, Hash ] The new MapReduce operation or the value # of the output location. # # @since 2.0.0 def out(location = nil) configure(:out, location) end # Set or get a scope on the operation. # # @example Set the scope value. # map_reduce.scope(value: 'test') # # @param [ Hash ] object The scope object. # # @return [ MapReduce, Hash ] The new MapReduce operation or the value # of the scope. # # @since 2.0.0 def scope(object = nil) configure(:scope, object) end # Whether to include the timing information in the result. # # @example Set the verbose value. # map_reduce.verbose(false) # # @param [ true, false ] value Whether to include timing information # in the result. # # @return [ MapReduce, Hash ] The new MapReduce operation or the value # of the verbose option. # # @since 2.0.5 def verbose(value = nil) configure(:verbose, value) end # Execute the map reduce, without doing a fetch query to retrieve the results # if outputted to a collection. # # @example Execute the map reduce and get the raw result. # map_reduce.execute # # @return [ Mongo::Operation::Result ] The raw map reduce result # # @since 2.5.0 def execute view.send(:with_session, @options) do |session| legacy_write_with_retry do |server| send_initial_query(server, session) end end end private def server_selector @view.send(:server_selector) end def inline? out.nil? || out == { inline: 1 } || out == { INLINE => 1 } end def map_reduce_spec(session = nil) Builder::MapReduce.new(map_function, reduce_function, view, options.merge(session: session)).specification end def new(options) MapReduce.new(view, map_function, reduce_function, options) end def initial_query_op(session) Operation::Commands::MapReduce.new(map_reduce_spec(session)) end def valid_server?(server) server.standalone? || server.mongos? || server.primary? || secondary_ok? end def secondary_ok? out.respond_to?(:keys) && out.keys.first.to_s.downcase == INLINE end def send_initial_query(server, session) unless valid_server?(server) log_warn(REROUTE) server = cluster.next_primary(false) end validate_collation!(server) initial_query_op(session).execute(server) end def fetch_query_spec Builder::MapReduce.new(map_function, reduce_function, view, options).query_specification end def find_command_spec(session) Builder::MapReduce.new(map_function, reduce_function, view, options.merge(session: session)).command_specification end def fetch_query_op(server, session) if server.features.find_command_enabled? Operation::Commands::Find.new(find_command_spec(session)) else Operation::Read::Query.new(fetch_query_spec) end end def send_fetch_query(server, session) fetch_query_op(server, session).execute(server) end def validate_collation!(server) if (view.options[:collation] || options[:collation]) && !server.features.collation_enabled? raise Error::UnsupportedCollation.new end end end end end end mongo-2.5.1/lib/mongo/collection/view/immutable.rb0000644000004100000410000000201613257253113022165 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Defines behaviour around views being configurable and immutable. # # @since 2.0.0 module Immutable # @return [ Hash ] options The additional query options. attr_reader :options private def configure(field, value) return options[field] if value.nil? new(options.merge(field => value)) end end end end end mongo-2.5.1/lib/mongo/collection/view/writable.rb0000644000004100000410000003171513257253113022027 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Defines write related behaviour for collection view. # # @since 2.0.0 module Writable # The array filters field constant. # # @since 2.5.0 ARRAY_FILTERS = 'array_filters'.freeze # Finds a single document in the database via findAndModify and deletes # it, returning the original document. # # @example Find one document and delete it. # view.find_one_and_delete # # @param [ Hash ] opts The options. # # @option opts [ Hash ] :collation The collation to use. # # @return [ BSON::Document, nil ] The document, if found. # # @since 2.0.0 def find_one_and_delete(opts = {}) cmd = { :findandmodify => collection.name, :query => filter, :remove => true } cmd[:fields] = projection if projection cmd[:sort] = sort if sort cmd[:maxTimeMS] = max_time_ms if max_time_ms cmd[:writeConcern] = write_concern.options if write_concern with_session(opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| apply_collation!(cmd, server, opts) Operation::Commands::Command.new( :selector => cmd, :db_name => database.name, :session => session, :txn_num => txn_num ).execute(server) end end.first['value'] end # Finds a single document and replaces it. # # @example Find a document and replace it, returning the original. # view.find_one_and_replace({ name: 'test' }, :return_document => :before) # # @example Find a document and replace it, returning the new document. # view.find_one_and_replace({ name: 'test' }, :return_document => :after) # # @param [ BSON::Document ] replacement The replacement. # @param [ Hash ] opts The options. # # @option opts [ Symbol ] :return_document Either :before or :after. # @option opts [ true, false ] :upsert Whether to upsert if the document doesn't exist. # @option opts [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option opts [ Hash ] :write_concern The write concern options. # Defaults to the collection's write concern. # @option opts [ Hash ] :collation The collation to use. # # @return [ BSON::Document ] The document. # # @since 2.0.0 def find_one_and_replace(replacement, opts = {}) find_one_and_update(replacement, opts) end # Finds a single document and updates it. # # @example Find a document and update it, returning the original. # view.find_one_and_update({ "$set" => { name: 'test' }}, :return_document => :before) # # @param [ BSON::Document ] document The updates. # @param [ Hash ] opts The options. # # @option opts [ Symbol ] :return_document Either :before or :after. # @option opts [ true, false ] :upsert Whether to upsert if the document doesn't exist. # @option opts [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option opts [ Hash ] :write_concern The write concern options. # Defaults to the collection's write concern. # @option opts [ Hash ] :collation The collation to use. # @option opts [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # # @return [ BSON::Document ] The document. # # @since 2.0.0 def find_one_and_update(document, opts = {}) cmd = { :findandmodify => collection.name, :query => filter } cmd[:update] = document cmd[:fields] = projection if projection cmd[:sort] = sort if sort cmd[:new] = !!(opts[:return_document] && opts[:return_document] == :after) cmd[:upsert] = opts[:upsert] if opts[:upsert] cmd[:maxTimeMS] = max_time_ms if max_time_ms cmd[:bypassDocumentValidation] = !!opts[:bypass_document_validation] cmd[:writeConcern] = write_concern.options if write_concern value = with_session(opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| apply_collation!(cmd, server, opts) apply_array_filters!(cmd, server, opts) Operation::Commands::Command.new( :selector => cmd, :db_name => database.name, :session => session, :txn_num => txn_num ).execute(server) end end.first['value'] value unless value.nil? || value.empty? end # Remove documents from the collection. # # @example Remove multiple documents from the collection. # collection_view.delete_many # # @param [ Hash ] opts The options. # # @option opts [ Hash ] :collation The collation to use. # # @return [ Result ] The response from the database. # # @since 2.0.0 def delete_many(opts = {}) delete_doc = { Operation::Q => filter, Operation::LIMIT => 0 } with_session(opts) do |session| legacy_write_with_retry do |server| apply_collation!(delete_doc, server, opts) Operation::Write::Delete.new( :delete => delete_doc, :db_name => collection.database.name, :coll_name => collection.name, :write_concern => collection.write_concern, :session => session ).execute(server) end end end # Remove a document from the collection. # # @example Remove a single document from the collection. # collection_view.delete_one # # @param [ Hash ] opts The options. # # @option opts [ Hash ] :collation The collation to use. # # @return [ Result ] The response from the database. # # @since 2.0.0 def delete_one(opts = {}) delete_doc = { Operation::Q => filter, Operation::LIMIT => 1 } write_concern = collection.write_concern with_session(opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| apply_collation!(delete_doc, server, opts) Operation::Write::Delete.new( :delete => delete_doc, :db_name => collection.database.name, :coll_name => collection.name, :write_concern => write_concern, :session => session, :txn_num => txn_num ).execute(server) end end end # Replaces a single document in the database with the new document. # # @example Replace a single document. # collection_view.replace_one({ name: 'test' }) # # @param [ Hash ] replacement The replacement document. # @param [ Hash ] opts The options. # # @option opts [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option opts [ Hash ] :collation The collation to use. # # @return [ Result ] The response from the database. # # @since 2.0.0 def replace_one(replacement, opts = {}) update_doc = { Operation::Q => filter, Operation::U => replacement, Operation::MULTI => false, Operation::UPSERT => !!opts[:upsert] } write_concern = collection.write_concern with_session(opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| apply_collation!(update_doc, server, opts) apply_array_filters!(update_doc, server, opts) Operation::Write::Update.new( :update => update_doc, :db_name => collection.database.name, :coll_name => collection.name, :write_concern => write_concern, :bypass_document_validation => !!opts[:bypass_document_validation], :session => session, :txn_num => txn_num ).execute(server) end end end # Update documents in the collection. # # @example Update multiple documents in the collection. # collection_view.update_many('$set' => { name: 'test' }) # # @param [ Hash ] spec The update statement. # @param [ Hash ] opts The options. # # @option opts [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option opts [ Hash ] :collation The collation to use. # @option opts [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # # @return [ Result ] The response from the database. # # @since 2.0.0 def update_many(spec, opts = {}) update_doc = { Operation::Q => filter, Operation::U => spec, Operation::MULTI => true, Operation::UPSERT => !!opts[:upsert] } with_session(opts) do |session| legacy_write_with_retry do |server| apply_collation!(update_doc, server, opts) apply_array_filters!(update_doc, server, opts) Operation::Write::Update.new( :update => update_doc, :db_name => collection.database.name, :coll_name => collection.name, :write_concern => collection.write_concern, :bypass_document_validation => !!opts[:bypass_document_validation], :session => session ).execute(server) end end end # Update a single document in the collection. # # @example Update a single document in the collection. # collection_view.update_one('$set' => { name: 'test' }) # # @param [ Hash ] spec The update statement. # @param [ Hash ] opts The options. # # @option opts [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option opts [ Hash ] :collation The collation to use. # @option opts [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # # @return [ Result ] The response from the database. # # @since 2.0.0 def update_one(spec, opts = {}) update_doc = { Operation::Q => filter, Operation::U => spec, Operation::MULTI => false, Operation::UPSERT => !!opts[:upsert] } write_concern = collection.write_concern with_session(opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| apply_collation!(update_doc, server, opts) apply_array_filters!(update_doc, server, opts) Operation::Write::Update.new( :update => update_doc, :db_name => collection.database.name, :coll_name => collection.name, :write_concern => write_concern, :bypass_document_validation => !!opts[:bypass_document_validation], :session => session, :txn_num => txn_num ).execute(server) end end end private def apply_array_filters!(doc, server, opts = {}) if filters = opts[:array_filters] || opts[ARRAY_FILTERS] validate_array_filters!(server, filters) doc[:arrayFilters] = filters end end def validate_array_filters!(server, filters) if filters && !server.features.array_filters_enabled? raise Error::UnsupportedArrayFilters.new end end end end end end mongo-2.5.1/lib/mongo/collection/view/iterable.rb0000644000004100000410000000530213257253113021776 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Defines iteration related behaviour for collection views, including # cursor instantiation. # # @since 2.0.0 module Iterable # Iterate through documents returned by a query with this +View+. # # @example Iterate through the result of the view. # view.each do |document| # p document # end # # @return [ Enumerator ] The enumerator. # # @since 2.0.0 # # @yieldparam [ Hash ] Each matching document. def each @cursor = nil session = client.send(:get_session, @options) read_with_retry do server = server_selector.select_server(cluster, false) result = send_initial_query(server, session) @cursor = Cursor.new(view, result, server, session: session) end @cursor.each do |doc| yield doc end if block_given? @cursor.to_enum end # Stop the iteration by sending a KillCursors command to the server. # # @example Stop the iteration. # view.close_query # # @since 2.1.0 def close_query @cursor.send(:kill_cursors) if @cursor && !@cursor.closed? end alias :kill_cursors :close_query private def initial_query_op(server, session) if server.features.find_command_enabled? initial_command_op(session) else Operation::Read::Query.new(Builder::OpQuery.new(self).specification) end end def initial_command_op(session) if explained? Operation::Commands::Explain.new(Builder::FindCommand.new(self, session).explain_specification) else Operation::Commands::Find.new(Builder::FindCommand.new(self, session).specification) end end def send_initial_query(server, session = nil) validate_collation!(server, collation) initial_query_op(server, session).execute(server) end end end end end mongo-2.5.1/lib/mongo/collection/view/readable.rb0000644000004100000410000004252213257253113021753 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Defines read related behaviour for collection view. # # @since 2.0.0 module Readable # The query modifier constant. # # @since 2.2.0 QUERY = '$query'.freeze # The modifiers option constant. # # @since 2.2.0 MODIFIERS = 'modifiers'.freeze # Execute an aggregation on the collection view. # # @example Aggregate documents. # view.aggregate([ # { "$group" => { "_id" => "$city", "tpop" => { "$sum" => "$pop" }}} # ]) # # @param [ Array ] pipeline The aggregation pipeline. # @param [ Hash ] options The aggregation options. # # @return [ Aggregation ] The aggregation object. # # @since 2.0.0 def aggregate(pipeline, options = {}) Aggregation.new(self, pipeline, options) end # Allows the query to get partial results if some shards are down. # # @example Allow partial results. # view.allow_partial_results # # @return [ View ] The new view. # # @since 2.0.0 def allow_partial_results configure(:allow_partial_results, true) end # Tell the query's cursor to stay open and wait for data. # # @example Await data on the cursor. # view.await_data # # @return [ View ] The new view. # # @since 2.0.0 def await_data configure(:await_data, true) end # The number of documents returned in each batch of results from MongoDB. # # @example Set the batch size. # view.batch_size(5) # # @note Specifying 1 or a negative number is analogous to setting a limit. # # @param [ Integer ] batch_size The size of each batch of results. # # @return [ Integer, View ] Either the batch_size value or a # new +View+. # # @since 2.0.0 def batch_size(batch_size = nil) configure(:batch_size, batch_size) end # Associate a comment with the query. # # @example Add a comment. # view.comment('slow query') # # @note Set profilingLevel to 2 and the comment will be logged in the profile # collection along with the query. # # @param [ String ] comment The comment to be associated with the query. # # @return [ String, View ] Either the comment or a # new +View+. # # @since 2.0.0 def comment(comment = nil) configure(:comment, comment) end # Get a count of matching documents in the collection. # # @example Get the number of documents in the collection. # collection_view.count # # @param [ Hash ] opts Options for the count command. # # @option opts :skip [ Integer ] The number of documents to skip. # @option opts :hint [ Hash ] Override default index selection and force # MongoDB to use a specific index for the query. # @option opts :limit [ Integer ] Max number of docs to return. # @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the # command to run. # @option opts [ Hash ] :read The read preference options. # @option opts [ Hash ] :collation The collation to use. # # @return [ Integer ] The document count. # # @since 2.0.0 def count(opts = {}) cmd = { :count => collection.name, :query => filter } cmd[:skip] = opts[:skip] if opts[:skip] cmd[:hint] = opts[:hint] if opts[:hint] cmd[:limit] = opts[:limit] if opts[:limit] cmd[:maxTimeMS] = opts[:max_time_ms] if opts[:max_time_ms] cmd[:readConcern] = collection.read_concern if collection.read_concern read_pref = opts[:read] || read_preference selector = ServerSelector.get(read_pref || server_selector) read_with_retry do server = selector.select_server(cluster, false) apply_collation!(cmd, server, opts) with_session(opts) do |session| Operation::Commands::Count.new({ :selector => cmd, :db_name => database.name, :options => {:limit => -1}, :read => read_pref, :session => session }).execute(server) end.n.to_i end end # Get a list of distinct values for a specific field. # # @example Get the distinct values. # collection_view.distinct('name') # # @param [ String, Symbol ] field_name The name of the field. # @param [ Hash ] opts Options for the distinct command. # # @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the # command to run. # @option opts [ Hash ] :read The read preference options. # @option opts [ Hash ] :collation The collation to use. # # @return [ Array ] The list of distinct values. # # @since 2.0.0 def distinct(field_name, opts = {}) cmd = { :distinct => collection.name, :key => field_name.to_s, :query => filter } cmd[:maxTimeMS] = opts[:max_time_ms] if opts[:max_time_ms] cmd[:readConcern] = collection.read_concern if collection.read_concern read_pref = opts[:read] || read_preference selector = ServerSelector.get(read_pref || server_selector) read_with_retry do server = selector.select_server(cluster, false) apply_collation!(cmd, server, opts) with_session(opts) do |session| Operation::Commands::Distinct.new({ :selector => cmd, :db_name => database.name, :options => {:limit => -1}, :read => read_pref, :session => session }).execute(server) end.first['values'] end end # The index that MongoDB will be forced to use for the query. # # @example Set the index hint. # view.hint(name: 1) # # @param [ Hash ] hint The index to use for the query. # # @return [ Hash, View ] Either the hint or a new +View+. # # @since 2.0.0 def hint(hint = nil) configure(:hint, hint) end # The max number of docs to return from the query. # # @example Set the limit. # view.limit(5) # # @param [ Integer ] limit The number of docs to return. # # @return [ Integer, View ] Either the limit or a new +View+. # # @since 2.0.0 def limit(limit = nil) configure(:limit, limit) end # Execute a map/reduce operation on the collection view. # # @example Execute a map/reduce. # view.map_reduce(map, reduce) # # @param [ String ] map The map js function. # @param [ String ] reduce The reduce js function. # @param [ Hash ] options The map/reduce options. # # @return [ MapReduce ] The map reduce wrapper. # # @since 2.0.0 def map_reduce(map, reduce, options = {}) MapReduce.new(self, map, reduce, @options.merge(options)) end # Set the max number of documents to scan. # # @example Set the max scan value. # view.max_scan(1000) # # @param [ Integer ] value The max number to scan. # # @return [ Integer, View ] The value or a new +View+. # # @since 2.0.0 def max_scan(value = nil) configure(:max_scan, value) end # Set the maximum value to search. # # @example Set the max value. # view.max_value(_id: 1) # # @param [ Hash ] value The max field and value. # # @return [ Hash, View ] The value or a new +View+. # # @since 2.1.0 def max_value(value = nil) configure(:max_value, value) end # Set the minimum value to search. # # @example Set the min value. # view.min_value(_id: 1) # # @param [ Hash ] value The min field and value. # # @return [ Hash, View ] The value or a new +View+. # # @since 2.1.0 def min_value(value = nil) configure(:min_value, value) end # The server normally times out idle cursors after an inactivity period # (10 minutes) to prevent excess memory use. Set this option to prevent that. # # @example Set the cursor to not timeout. # view.no_cursor_timeout # # @return [ View ] The new view. # # @since 2.0.0 def no_cursor_timeout configure(:no_cursor_timeout, true) end # The fields to include or exclude from each doc in the result set. # # @example Set the fields to include or exclude. # view.projection(name: 1) # # @note A value of 0 excludes a field from the doc. A value of 1 includes it. # Values must all be 0 or all be 1, with the exception of the _id value. # The _id field is included by default. It must be excluded explicitly. # # @param [ Hash ] document The field and 1 or 0, to include or exclude it. # # @return [ Hash, View ] Either the fields or a new +View+. # # @since 2.0.0 def projection(document = nil) validate_doc!(document) if document configure(:projection, document) end # The read preference to use for the query. # # @note If none is specified for the query, the read preference of the # collection will be used. # # @param [ Hash ] value The read preference mode to use for the query. # # @return [ Symbol, View ] Either the read preference or a # new +View+. # # @since 2.0.0 def read(value = nil) return read_preference if value.nil? configure(:read, value) end # Set whether to return only the indexed field or fields. # # @example Set the return key value. # view.return_key(true) # # @param [ true, false ] value The return key value. # # @return [ true, false, View ] The value or a new +View+. # # @since 2.1.0 def return_key(value = nil) configure(:return_key, value) end # Set whether the disk location should be shown for each document. # # @example Set show disk location option. # view.show_disk_loc(true) # # @param [ true, false ] value The value for the field. # # @return [ true, false, View ] Either the value or a new # +View+. # # @since 2.0.0 def show_disk_loc(value = nil) configure(:show_disk_loc, value) end alias :show_record_id :show_disk_loc # The number of docs to skip before returning results. # # @example Set the number to skip. # view.skip(10) # # @param [ Integer ] number Number of docs to skip. # # @return [ Integer, View ] Either the skip value or a # new +View+. # # @since 2.0.0 def skip(number = nil) configure(:skip, number) end # Set the snapshot value for the view. # # @note When set to true, prevents documents from returning more than # once. # # @example Set the snapshot value. # view.snapshot(true) # # @param [ true, false ] value The snapshot value. # # @since 2.0.0 def snapshot(value = nil) configure(:snapshot, value) end # The key and direction pairs by which the result set will be sorted. # # @example Set the sort criteria # view.sort(name: -1) # # @param [ Hash ] spec The attributes and directions to sort by. # # @return [ Hash, View ] Either the sort setting or a # new +View+. # # @since 2.0.0 def sort(spec = nil) configure(:sort, spec) end # “meta” operators that let you modify the output or behavior of a query. # # @example Set the modifiers document. # view.modifiers(:$orderby => Mongo::Index::ASCENDING) # # @param [ Hash ] doc The modifiers document. # # @return [ Hash, View ] Either the modifiers document or a new +View+. # # @since 2.1.0 def modifiers(doc = nil) return Builder::Modifiers.map_server_modifiers(options) if doc.nil? new(options.merge(Builder::Modifiers.map_driver_options(doc))) end # A cumulative time limit in milliseconds for processing get more operations # on a cursor. # # @example Set the max await time ms value. # view.max_await_time_ms(500) # # @param [ Integer ] max The max time in milliseconds. # # @return [ Integer, View ] Either the max await time ms value or a new +View+. # # @since 2.1.0 def max_await_time_ms(max = nil) configure(:max_await_time_ms, max) end # A cumulative time limit in milliseconds for processing operations on a cursor. # # @example Set the max time ms value. # view.max_time_ms(500) # # @param [ Integer ] max The max time in milliseconds. # # @return [ Integer, View ] Either the max time ms value or a new +View+. # # @since 2.1.0 def max_time_ms(max = nil) configure(:max_time_ms, max) end # The type of cursor to use. Can be :tailable or :tailable_await. # # @example Set the cursor type. # view.cursor_type(:tailable) # # @param [ :tailable, :tailable_await ] type The cursor type. # # @return [ :tailable, :tailable_await, View ] Either the cursor type setting or a new +View+. # # @since 2.3.0 def cursor_type(type = nil) configure(:cursor_type, type) end private def collation(doc = nil) configure(:collation, doc) end def read_preference @read_preference ||= (options[:read] || collection.read_preference) end def server_selector @server_selector ||= ServerSelector.get(read_preference || collection.server_selector) end def parallel_scan(cursor_count, options = {}) session = client.send(:get_session, @options) server = server_selector.select_server(cluster, false) cmd = Operation::Commands::ParallelScan.new({ :coll_name => collection.name, :db_name => database.name, :cursor_count => cursor_count, :read_concern => collection.read_concern, :session => session }.merge!(options)) cmd.execute(server).cursor_ids.map do |cursor_id| result = if server.features.find_command_enabled? Operation::Commands::GetMore.new({ :selector => {:getMore => cursor_id, :collection => collection.name}, :db_name => database.name, :session => session }).execute(server) else Operation::Read::GetMore.new({ :to_return => 0, :cursor_id => cursor_id, :db_name => database.name, :coll_name => collection.name }).execute(server) end Cursor.new(self, result, server, session: session) end end def validate_doc!(doc) raise Error::InvalidDocument.new unless doc.respond_to?(:keys) end end end end end mongo-2.5.1/lib/mongo/collection/view/change_stream.rb0000644000004100000410000001414313257253113023012 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/collection/view/change_stream/retryable' module Mongo class Collection class View # Provides behaviour around a `$changeStream` pipeline stage in the # aggregation framework. Specifying this stage allows users to request that # notifications are sent for all changes to a particular collection or database. # # @note Only available in server versions 3.6 and higher. # @note ChangeStreams do not work properly with JRuby because of the issue documented # here: https://github.com/jruby/jruby/issues/4212 # Namely, JRuby eagerly evaluates #next on an Enumerator in a background green thread. # So calling #next on the change stream will cause getmores to be called in a loop in the background. # # # @since 2.5.0 class ChangeStream < Aggregation include Retryable # @return [ String ] The fullDocument option default value. # # @since 2.5.0 FULL_DOCUMENT_DEFAULT = 'default'.freeze # @return [ BSON::Document ] The change stream options. # # @since 2.5.0 attr_reader :options # Initialize the change stream for the provided collection view, pipeline # and options. # # @example Create the new change stream view. # ChangeStream.new(view, pipeline, options) # # @param [ Collection::View ] view The collection view. # @param [ Array ] pipeline The pipeline of operators to filter the change notifications. # @param [ Hash ] options The change stream options. # # @option options [ String ] :full_document Allowed values: ‘default’, ‘updateLookup’. Defaults to ‘default’. # When set to ‘updateLookup’, the change notification for partial updates will include both a delta # describing the changes to the document, as well as a copy of the entire document that was changed # from some time after the change occurred. # @option options [ BSON::Document, Hash ] :resume_after Specifies the logical starting point for the # new change stream. # @option options [ Integer ] :max_await_time_ms The maximum amount of time for the server to wait # on new documents to satisfy a change stream query. # @option options [ Integer ] :batch_size The number of documents to return per batch. # @option options [ BSON::Document, Hash ] :collation The collation to use. # # @since 2.5.0 def initialize(view, pipeline, options = {}) @view = view @change_stream_filters = pipeline && pipeline.dup @options = options && options.dup.freeze @resume_token = @options[:resume_after] read_with_one_retry { create_cursor! } end # Iterate through documents returned by the change stream. # # @example Iterate through the stream of documents. # stream.each do |document| # p document # end # # @return [ Enumerator ] The enumerator. # # @since 2.5.0 # # @yieldparam [ BSON::Document ] Each change stream document. def each raise StopIteration.new if closed? begin @cursor.each do |doc| cache_resume_token(doc) yield doc end if block_given? @cursor.to_enum rescue => e close if retryable?(e) create_cursor! retry end raise end end # Close the change stream. # # @example Close the change stream. # stream.close # # @return [ nil ] nil. # # @since 2.5.0 def close unless closed? begin; @cursor.send(:kill_cursors); rescue; end @cursor = nil end end # Is the change stream closed? # # @example Determine whether the change stream is closed. # stream.closed? # # @return [ true, false ] If the change stream is closed. # # @since 2.5.0 def closed? @cursor.nil? end # Get a formatted string for use in inspection. # # @example Inspect the change stream object. # stream.inspect # # @return [ String ] The change stream inspection. # # @since 2.5.0 def inspect "#" end private def cache_resume_token(doc) unless @resume_token = (doc[:_id] && doc[:_id].dup) raise Error::MissingResumeToken.new end end def create_cursor! session = client.send(:get_session, @options) server = server_selector.select_server(cluster, false) result = send_initial_query(server, session) @cursor = Cursor.new(view, result, server, disable_retry: true, session: session) end def pipeline change_doc = { fullDocument: ( @options[:full_document] || FULL_DOCUMENT_DEFAULT ) } change_doc[:resumeAfter] = @resume_token if @resume_token [{ '$changeStream' => change_doc }] + @change_stream_filters end def send_initial_query(server, session) initial_query_op(session).execute(server) end end end end end mongo-2.5.1/lib/mongo/collection/view/explainable.rb0000644000004100000410000000335413257253113022500 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Collection class View # Defines explain related behaviour for collection view. # # @since 2.0.0 module Explainable # The query planner verbosity constant. # # @since 2.2.0 QUERY_PLANNER = 'queryPlanner'.freeze # The execution stats verbosity constant. # # @since 2.2.0 EXECUTION_STATS = 'executionStats'.freeze # The all plans execution verbosity constant. # # @since 2.2.0 ALL_PLANS_EXECUTION = 'allPlansExecution'.freeze # Get the explain plan for the query. # # @example Get the explain plan for the query. # view.explain # # @return [ Hash ] A single document with the explain plan. # # @since 2.0.0 def explain self.class.new(collection, selector, options.merge(explain_options)).first end private def explained? !!options[:explain] end def explain_options explain_limit = limit || 0 { :limit => -explain_limit.abs, :explain => true } end end end end end mongo-2.5.1/lib/mongo/collection/view/builder.rb0000644000004100000410000000156613257253113021645 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/collection/view/builder/aggregation' require 'mongo/collection/view/builder/map_reduce' require 'mongo/collection/view/builder/op_query' require 'mongo/collection/view/builder/find_command' require 'mongo/collection/view/builder/flags' require 'mongo/collection/view/builder/modifiers' mongo-2.5.1/lib/mongo/cluster.rb0000644000004100000410000003740413257253113016573 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/cluster/topology' require 'mongo/cluster/reapers/socket_reaper' require 'mongo/cluster/reapers/cursor_reaper' require 'mongo/cluster/periodic_executor' require 'mongo/cluster/app_metadata' module Mongo # Represents a group of servers on the server side, either as a single server, a # replica set, or a single or multiple mongos. # # @since 2.0.0 class Cluster extend Forwardable include Monitoring::Publishable include Event::Subscriber include Loggable # The default number of mongos read retries. # # @since 2.1.1 MAX_READ_RETRIES = 1 # The default number of mongos write retries. # # @since 2.4.2 MAX_WRITE_RETRIES = 1 # The default mongos read retry interval, in seconds. # # @since 2.1.1 READ_RETRY_INTERVAL = 5 # How often an idle primary writes a no-op to the oplog. # # @since 2.4.0 IDLE_WRITE_PERIOD_SECONDS = 10 # The cluster time key in responses from mongos servers. # # @since 2.5.0 CLUSTER_TIME = 'clusterTime'.freeze # @return [ Hash ] The options hash. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # @return [ Object ] The cluster topology. attr_reader :topology # @return [ Mongo::Cluster::AppMetadata ] The application metadata, used for connection # handshakes. # # @since 2.4.0 attr_reader :app_metadata # @return [ BSON::Document ] The latest cluster time seen. # # @since 2.5.0 attr_reader :cluster_time # @private # # @since 2.5.1 attr_reader :session_pool def_delegators :topology, :replica_set?, :replica_set_name, :sharded?, :single?, :unknown?, :member_discovered def_delegators :@cursor_reaper, :register_cursor, :schedule_kill_cursor, :unregister_cursor # Determine if this cluster of servers is equal to another object. Checks the # servers currently in the cluster, not what was configured. # # @example Is the cluster equal to the object? # cluster == other # # @param [ Object ] other The object to compare to. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Cluster) addresses == other.addresses && options == other.options end # Add a server to the cluster with the provided address. Useful in # auto-discovery of new servers when an existing server executes an ismaster # and potentially non-configured servers were included. # # @example Add the server for the address to the cluster. # cluster.add('127.0.0.1:27018') # # @param [ String ] host The address of the server to add. # # @return [ Server ] The newly added server, if not present already. # # @since 2.0.0 def add(host) address = Address.new(host, options) if !addresses.include?(address) if addition_allowed?(address) @update_lock.synchronize { @addresses.push(address) } server = Server.new(address, self, @monitoring, event_listeners, options) @update_lock.synchronize { @servers.push(server) } server end end end # Determine if the cluster would select a readable server for the # provided read preference. # # @example Is a readable server present? # topology.has_readable_server?(server_selector) # # @param [ ServerSelector ] server_selector The server # selector. # # @return [ true, false ] If a readable server is present. # # @since 2.4.0 def has_readable_server?(server_selector = nil) topology.has_readable_server?(self, server_selector) end # Determine if the cluster would select a writable server. # # @example Is a writable server present? # topology.has_writable_server? # # @return [ true, false ] If a writable server is present. # # @since 2.4.0 def has_writable_server? topology.has_writable_server?(self) end # Instantiate the new cluster. # # @api private # # @example Instantiate the cluster. # Mongo::Cluster.new(["127.0.0.1:27017"], monitoring) # # @note Cluster should never be directly instantiated outside of a Client. # # @param [ Array ] seeds The addresses of the configured servers. # @param [ Monitoring ] monitoring The monitoring. # @param [ Hash ] options The options. # # @since 2.0.0 def initialize(seeds, monitoring, options = Options::Redacted.new) @addresses = [] @servers = [] @monitoring = monitoring @event_listeners = Event::Listeners.new @options = options.freeze @app_metadata = AppMetadata.new(self) @update_lock = Mutex.new @pool_lock = Mutex.new @cluster_time = nil @cluster_time_lock = Mutex.new @topology = Topology.initial(seeds, monitoring, options) Session::SessionPool.create(self) publish_sdam_event( Monitoring::TOPOLOGY_OPENING, Monitoring::Event::TopologyOpening.new(@topology) ) subscribe_to(Event::STANDALONE_DISCOVERED, Event::StandaloneDiscovered.new(self)) subscribe_to(Event::DESCRIPTION_CHANGED, Event::DescriptionChanged.new(self)) subscribe_to(Event::MEMBER_DISCOVERED, Event::MemberDiscovered.new(self)) seeds.each{ |seed| add(seed) } publish_sdam_event( Monitoring::TOPOLOGY_CHANGED, Monitoring::Event::TopologyChanged.new(@topology, @topology) ) if @servers.size > 1 @cursor_reaper = CursorReaper.new @socket_reaper = SocketReaper.new(self) @periodic_executor = PeriodicExecutor.new(@cursor_reaper, @socket_reaper) @periodic_executor.run! ObjectSpace.define_finalizer(self, self.class.finalize(pools, @periodic_executor, @session_pool)) end # Finalize the cluster for garbage collection. Disconnects all the scoped # connection pools. # # @example Finalize the cluster. # Cluster.finalize(pools) # # @param [ Hash ] pools The connection pools. # @param [ PeriodicExecutor ] periodic_executor The periodic executor. # @param [ SessionPool ] session_pool The session pool. # # @return [ Proc ] The Finalizer. # # @since 2.2.0 def self.finalize(pools, periodic_executor, session_pool) proc do session_pool.end_sessions periodic_executor.stop! pools.values.each do |pool| pool.disconnect! end end end # Get the nicer formatted string for use in inspection. # # @example Inspect the cluster. # cluster.inspect # # @return [ String ] The cluster inspection. # # @since 2.0.0 def inspect "#" end # Get the next primary server we can send an operation to. # # @example Get the next primary server. # cluster.next_primary # # @param [ true, false ] ping Whether to ping the server before selection. # # @return [ Mongo::Server ] A primary server. # # @since 2.0.0 def next_primary(ping = true) @primary_selector ||= ServerSelector.get(ServerSelector::PRIMARY) @primary_selector.select_server(self, ping) end # Elect a primary server from the description that has just changed to a # primary. # # @example Elect a primary server. # cluster.elect_primary!(description) # # @param [ Server::Description ] description The newly elected primary. # # @return [ Topology ] The cluster topology. # # @since 2.0.0 def elect_primary!(description) @topology = topology.elect_primary(description, servers_list) end # Get the maximum number of times the cluster can retry a read operation on # a mongos. # # @example Get the max read retries. # cluster.max_read_retries # # @return [ Integer ] The maximum retries. # # @since 2.1.1 def max_read_retries options[:max_read_retries] || MAX_READ_RETRIES end # Get the scoped connection pool for the server. # # @example Get the connection pool. # cluster.pool(server) # # @param [ Server ] server The server. # # @return [ Server::ConnectionPool ] The connection pool. # # @since 2.2.0 def pool(server) @pool_lock.synchronize do pools[server.address] ||= Server::ConnectionPool.get(server) end end # Get the interval, in seconds, in which a mongos read operation is # retried. # # @example Get the read retry interval. # cluster.read_retry_interval # # @return [ Float ] The interval. # # @since 2.1.1 def read_retry_interval options[:read_retry_interval] || READ_RETRY_INTERVAL end # Notify the cluster that a standalone server was discovered so that the # topology can be updated accordingly. # # @example Notify the cluster that a standalone server was discovered. # cluster.standalone_discovered # # @return [ Topology ] The cluster topology. # # @since 2.0.6 def standalone_discovered @topology = topology.standalone_discovered end # Remove the server from the cluster for the provided address, if it # exists. # # @example Remove the server from the cluster. # server.remove('127.0.0.1:27017') # # @param [ String ] host The host/port or socket address. # # @since 2.0.0 def remove(host) address = Address.new(host) removed_servers = @servers.select { |s| s.address == address } @update_lock.synchronize { @servers = @servers - removed_servers } removed_servers.each{ |server| server.disconnect! } if removed_servers publish_sdam_event( Monitoring::SERVER_CLOSED, Monitoring::Event::ServerClosed.new(address, topology) ) @update_lock.synchronize { @addresses.reject! { |addr| addr == address } } end # Force a scan of all known servers in the cluster. # # @example Force a full cluster scan. # cluster.scan! # # @note This operation is done synchronously. If servers in the cluster are # down or slow to respond this can potentially be a slow operation. # # @return [ true ] Always true. # # @since 2.0.0 def scan! servers_list.each{ |server| server.scan! } and true end # Get a list of server candidates from the cluster that can have operations # executed on them. # # @example Get the server candidates for an operation. # cluster.servers # # @return [ Array ] The candidate servers. # # @since 2.0.0 def servers topology.servers(servers_list.compact).compact end # Disconnect all servers. # # @example Disconnect the cluster's servers. # cluster.disconnect! # # @return [ true ] Always true. # # @since 2.1.0 def disconnect! @periodic_executor.stop! @servers.each { |server| server.disconnect! } and true end # Reconnect all servers. # # @example Reconnect the cluster's servers. # cluster.reconnect! # # @return [ true ] Always true. # # @since 2.1.0 def reconnect! scan! servers.each { |server| server.reconnect! } @periodic_executor.restart! and true end # Add hosts in a description to the cluster. # # @example Add hosts in a description to the cluster. # cluster.add_hosts(description) # # @param [ Mongo::Server::Description ] description The description. # # @since 2.0.6 def add_hosts(description) if topology.add_hosts?(description, servers_list) description.servers.each { |s| add(s) } end end # Remove hosts in a description from the cluster. # # @example Remove hosts in a description from the cluster. # cluster.remove_hosts(description) # # @param [ Mongo::Server::Description ] description The description. # # @since 2.0.6 def remove_hosts(description) if topology.remove_hosts?(description) servers_list.each do |s| remove(s.address.to_s) if topology.remove_server?(description, s) end end end # Create a cluster for the provided client, for use when we don't want the # client's original cluster instance to be the same. # # @api private # # @example Create a cluster for the client. # Cluster.create(client) # # @param [ Client ] client The client to create on. # # @return [ Cluster ] The cluster. # # @since 2.0.0 def self.create(client) cluster = Cluster.new( client.cluster.addresses.map(&:to_s), client.instance_variable_get(:@monitoring).dup, client.options ) client.instance_variable_set(:@cluster, cluster) end # The addresses in the cluster. # # @example Get the addresses in the cluster. # cluster.addresses # # @return [ Array ] The addresses. # # @since 2.0.6 def addresses addresses_list end # The logical session timeout value in minutes. # # @example Get the logical session timeout in minutes. # cluster.logical_session_timeout # # @return [ Integer, nil ] The logical session timeout. # # @since 2.5.0 def logical_session_timeout servers.inject(nil) do |min, server| break unless timeout = server.logical_session_timeout [timeout, (min || timeout)].min end end # Update the max cluster time seen in a response. # # @example Update the cluster time. # cluster.update_cluster_time(result) # # @param [ Operation::Result ] result The operation result containing the cluster time. # # @return [ Object ] The cluster time. # # @since 2.5.0 def update_cluster_time(result) if cluster_time_doc = result.cluster_time @cluster_time_lock.synchronize do if @cluster_time.nil? @cluster_time = cluster_time_doc elsif cluster_time_doc[CLUSTER_TIME] > @cluster_time[CLUSTER_TIME] @cluster_time = cluster_time_doc end end end end private def get_session(options = {}) return options[:session].validate!(self) if options[:session] if sessions_supported? Session.new(@session_pool.checkout, self, { implicit: true }.merge(options)) end end def with_session(options = {}) session = get_session(options) yield(session) ensure session.end_session if (session && session.implicit?) end def sessions_supported? if servers.empty? && !topology.single? ServerSelector.get(mode: :primary_preferred).select_server(self) end !!logical_session_timeout rescue Error::NoServerAvailable end def direct_connection?(address) address.seed == @topology.seed end def addition_allowed?(address) !@topology.single? || direct_connection?(address) end def pools @pools ||= {} end def servers_list @update_lock.synchronize { @servers.dup } end def addresses_list @update_lock.synchronize { @addresses.dup } end end end mongo-2.5.1/lib/mongo/protocol/0000755000004100000410000000000013257253113016416 5ustar www-datawww-datamongo-2.5.1/lib/mongo/protocol/serializers.rb0000644000004100000410000003523513257253113021307 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # Container for various serialization strategies # # Each strategy must have a serialization method named +serailize+ # and a deserialization method named +deserialize+ # # Serialize methods must take buffer and value arguements and # serialize the value into the buffer # # Deserialize methods must take an IO stream argument and # deserialize the value from the stream of bytes # # @api private module Serializers private ZERO = 0.freeze NULL = 0.chr.freeze INT32_PACK = 'l<'.freeze INT64_PACK = 'q<'.freeze HEADER_PACK = 'l ] Array consisting of the deserialized # length, request id, response id, and op code. def self.deserialize(buffer) buffer.get_bytes(16).unpack(HEADER_PACK) end end # MongoDB wire protocol serialization strategy for C style strings. # # Serializes and de-serializes C style strings (null terminated). module CString # Serializes a C style string into the buffer # # @param buffer [ String ] Buffer to receive the serialized CString. # @param value [ String ] The string to be serialized. # # @return [ String ] Buffer with serialized value. def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_cstring(value) end end # MongoDB wire protocol serialization strategy for 32-bit Zero. # # Serializes and de-serializes one 32-bit Zero. module Zero # Serializes a 32-bit Zero into the buffer # # @param buffer [ String ] Buffer to receive the serialized Zero. # @param value [ Fixnum ] Ignored value. # # @return [ String ] Buffer with serialized value. def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_int32(ZERO) end end # MongoDB wire protocol serialization strategy for 32-bit integers. # # Serializes and de-serializes one 32-bit integer. module Int32 # Serializes a fixnum to a 4-byte 32-bit integer # # @param buffer [ String ] Buffer to receive the serialized Int32. # @param value [ Fixnum ] 32-bit integer to be serialized. # # @return [String] Buffer with serialized value. def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_int32(value) end # Deserializes a 32-bit Fixnum from the IO stream # # @param [ String ] buffer Buffer containing the 32-bit integer # # @return [ Fixnum ] Deserialized Int32 def self.deserialize(buffer) buffer.get_int32 end end # MongoDB wire protocol serialization strategy for 64-bit integers. # # Serializes and de-serializes one 64-bit integer. module Int64 # Serializes a fixnum to an 8-byte 64-bit integer # # @param buffer [ String ] Buffer to receive the serialized Int64. # @param value [ Fixnum ] 64-bit integer to be serialized. # # @return [ String ] Buffer with serialized value. def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_int64(value) end # Deserializes a 64-bit Fixnum from the IO stream # # @param [ String ] buffer Buffer containing the 64-bit integer. # # @return [Fixnum] Deserialized Int64. def self.deserialize(buffer) buffer.get_int64 end end # MongoDB wire protocol serialization strategy for a Section of OP_MSG. # # Serializes and de-serializes a list of Sections. # # @since 2.5.0 module Sections # Serializes the sections of an OP_MSG, payload type 0 or 1. # # @param [ BSON::ByteBuffer ] buffer Buffer to receive the serialized Sections. # @param [ Array ] value The sections to be serialized. # @param [ Fixnum ] max_bson_size The max bson size of documents in the sections. # @param [ true, false ] validating_keys Whether to validate document keys. # # @return [ BSON::ByteBuffer ] Buffer with serialized value. # # @since 2.5.0 def self.serialize(buffer, value, max_bson_size = nil, validating_keys = BSON::Config.validating_keys?) value.each do |section| case section[:type] when PayloadZero::TYPE PayloadZero.serialize(buffer, section[:payload], max_bson_size, false) when nil PayloadZero.serialize(buffer, section[:payload], max_bson_size, false) when PayloadOne::TYPE PayloadOne.serialize(buffer, section[:payload], max_bson_size, validating_keys) else raise Error::UnknownPayloadType.new(section[:type]) end end end # Deserializes a section of an OP_MSG from the IO stream. # # @param [ BSON::ByteBuffer ] buffer Buffer containing the sections. # # @return [ Array ] Deserialized sections. # # @since 2.5.0 def self.deserialize(buffer) end_length = (@flag_bits & Msg::FLAGS.index(:checksum_present)) == 1 ? 32 : 0 sections = [] until buffer.length == end_length case byte = buffer.get_byte when PayloadZero::TYPE_BYTE sections << PayloadZero.deserialize(buffer) when PayloadOne::TYPE_BYTE sections += PayloadOne.deserialize(buffer) else raise Error::UnknownPayloadType.new(byte) end end sections end # Whether there can be a size limit on this type after serialization. # # @return [ true ] Documents can be size limited upon serialization. # # @since 2.5.0 def self.size_limited? true end # MongoDB wire protocol serialization strategy for a payload 0 type Section of OP_MSG. # # @since 2.5.0 module PayloadZero # The byte identifier for this payload type. # # @since 2.5.0 TYPE = 0x0 # The byte corresponding to this payload type. # # @since 2.5.0 TYPE_BYTE = TYPE.chr.force_encoding(BSON::BINARY).freeze # Serializes a section of an OP_MSG, payload type 0. # # @param [ BSON::ByteBuffer ] buffer Buffer to receive the serialized Sections. # @param [ BSON::Document, Hash ] value The object to serialize. # @param [ Fixnum ] max_bson_size The max bson size of documents in the section. # @param [ true, false ] validating_keys Whether to validate document keys. # # @return [ BSON::ByteBuffer ] Buffer with serialized value. # # @since 2.5.0 def self.serialize(buffer, value, max_bson_size = nil, validating_keys = BSON::Config.validating_keys?) buffer.put_byte(TYPE_BYTE) Serializers::Document.serialize(buffer, value, max_bson_size, validating_keys) end # Deserializes a section of payload type 0 of an OP_MSG from the IO stream. # # @param [ BSON::ByteBuffer ] buffer Buffer containing the sections. # # @return [ Array ] Deserialized section. # # @since 2.5.0 def self.deserialize(buffer) BSON::Document.from_bson(buffer) end end # MongoDB wire protocol serialization strategy for a payload 1 type Section of OP_MSG. # # @since 2.5.0 module PayloadOne # The byte identifier for this payload type. # # @since 2.5.0 TYPE = 0x1 # The byte corresponding to this payload type. # # @since 2.5.0 TYPE_BYTE = TYPE.chr.force_encoding(BSON::BINARY).freeze # Serializes a section of an OP_MSG, payload type 1. # # @param [ BSON::ByteBuffer ] buffer Buffer to receive the serialized Sections. # @param [ BSON::Document, Hash ] value The object to serialize. # @param [ Fixnum ] max_bson_size The max bson size of documents in the section. # @param [ true, false ] validating_keys Whether to validate document keys. # # @return [ BSON::ByteBuffer ] Buffer with serialized value. # # @since 2.5.0 def self.serialize(buffer, value, max_bson_size = nil, validating_keys = BSON::Config.validating_keys?) buffer.put_byte(TYPE_BYTE) start = buffer.length buffer.put_int32(0) # hold for size buffer.put_cstring(value[:identifier]) value[:sequence].each do |document| Document.serialize(buffer, document, max_bson_size, validating_keys) end buffer.replace_int32(start, buffer.length - start) end # Deserializes a section of payload type 1 of an OP_MSG from the IO stream. # # @param [ BSON::ByteBuffer ] buffer Buffer containing the sections. # # @return [ Array ] Deserialized section. # # @since 2.5.0 def self.deserialize(buffer) start_size = buffer.length section_size = buffer.get_int32 # get the size end_size = start_size - section_size buffer.get_cstring # get the identifier documents = [] until buffer.length == end_size documents << BSON::Document.from_bson(buffer) end documents end end end # MongoDB wire protocol serialization strategy for a BSON Document. # # Serializes and de-serializes a single document. module Document # Serializes a document into the buffer # # @param buffer [ String ] Buffer to receive the BSON encoded document. # @param value [ Hash ] Document to serialize as BSON. # # @return [ String ] Buffer with serialized value. def self.serialize(buffer, value, max_bson_size = nil, validating_keys = BSON::Config.validating_keys?) start_size = buffer.length value.to_bson(buffer, validating_keys) if max_bson_size && buffer.length - start_size > max_bson_size raise Error::MaxBSONSize.new(max_bson_size) end end # Deserializes a document from the IO stream # # @param [ String ] buffer Buffer containing the BSON encoded document. # # @return [ Hash ] The decoded BSON document. def self.deserialize(buffer) BSON::Document.from_bson(buffer) end # Whether there can be a size limit on this type after serialization. # # @return [ true ] Documents can be size limited upon serialization. # # @since 2.0.0 def self.size_limited? true end end # MongoDB wire protocol serialization strategy for a single byte. # # Writes and fetches a single byte from the byte buffer. module Byte # Writes a byte into the buffer. # # @param [ BSON::ByteBuffer ] buffer Buffer to receive the single byte. # @param [ String ] value The byte to write to the buffer. # @param [ true, false ] validating_keys Whether to validate keys. # # @return [ BSON::ByteBuffer ] Buffer with serialized value. # # @since 2.5.0 def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_byte(value) end # Deserializes a byte from the byte buffer. # # @param [ BSON::ByteBuffer ] buffer Buffer containing the value to read. # # @return [ String ] The byte. # # @since 2.5.0 def self.deserialize(buffer) buffer.get_byte end end # MongoDB wire protocol serialization strategy for n bytes. # # Writes and fetches bytes from the byte buffer. module Bytes # Writes bytes into the buffer. # # @param [ BSON::ByteBuffer ] buffer Buffer to receive the bytes. # @param [ String ] value The bytes to write to the buffer. # @param [ true, false ] validating_keys Whether to validate keys. # # @return [ BSON::ByteBuffer ] Buffer with serialized value. # # @since 2.5.0 def self.serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) buffer.put_bytes(value) end # Deserializes bytes from the byte buffer. # # @param [ BSON::ByteBuffer ] buffer Buffer containing the value to read. # @param [ Integer ] num_bytes Number of bytes to read. # # @return [ String ] The bytes. # # @since 2.5.0 def self.deserialize(buffer, num_bytes = nil) buffer.get_bytes(num_bytes || buffer.length) end end end end end mongo-2.5.1/lib/mongo/protocol/get_more.rb0000644000004100000410000001146413257253113020552 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol GetMore message. # # This is a client request message that is sent to the server in order # to retrieve additional documents from a cursor that has already been # instantiated. # # The operation requires that you specify the database and collection # name as well as the cursor id because cursors are scoped to a namespace. # # @api semipublic class GetMore < Message # Creates a new GetMore message # # @example Get 15 additional documents from cursor 123 in 'xgen.users'. # GetMore.new('xgen', 'users', 15, 123) # # @param database [String, Symbol] The database to query. # @param collection [String, Symbol] The collection to query. # @param number_to_return [Integer] The number of documents to return. # @param cursor_id [Integer] The cursor id returned in a reply. def initialize(database, collection, number_to_return, cursor_id) @database = database @namespace = "#{database}.#{collection}" @number_to_return = number_to_return @cursor_id = cursor_id @upconverter = Upconverter.new(collection, cursor_id, number_to_return) super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: 'getMore', database_name: @database, command: upconverter.command, request_id: request_id ) end # Get more messages require replies from the database. # # @example Does the message require a reply? # message.replyable? # # @return [ true ] Always true for get more. # # @since 2.0.0 def replyable? true end protected attr_reader :upconverter private # The operation code required to specify a GetMore message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2005 # Field representing Zero encoded as an Int32 field :zero, Zero # @!attribute # @return [String] The namespace for this GetMore message. field :namespace, CString # @!attribute # @return [Fixnum] The number to return for this GetMore message. field :number_to_return, Int32 # @!attribute # @return [Fixnum] The cursor id to get more documents from. field :cursor_id, Int64 # Converts legacy getmore messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # The get more constant. # # @since 2.2.0 GET_MORE = 'getMore'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ Integer ] cursor_id The cursor id. attr_reader :cursor_id # @return [ Integer ] number_to_return The number of docs to return. attr_reader :number_to_return # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new('users', 1, 1) # # @param [ String ] collection The name of the collection. # @param [ Integer ] cursor_id The cursor id. # @param [ Integer ] number_to_return The number of documents to # return. # # @since 2.1.0 def initialize(collection, cursor_id, number_to_return) @collection = collection @cursor_id = cursor_id @number_to_return = number_to_return end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command document = BSON::Document.new document.store(GET_MORE, cursor_id) document.store(Message::BATCH_SIZE, number_to_return) document.store(Message::COLLECTION, collection) document end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/bit_vector.rb0000644000004100000410000000416013257253113021104 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol module Serializers # Class used to define a bitvector for a MongoDB wire protocol message. # # Defines serialization strategy upon initialization. # # @api private class BitVector # Initializes a BitVector with a layout # # @param layout [ Array ] the array of fields in the bit vector def initialize(layout) @masks = {} layout.each_with_index do |field, index| @masks[field] = 2**index if field end end # Serializes vector by encoding each symbol according to its mask # # @param buffer [ String ] Buffer to receive the serialized vector # @param value [ Array ] Array of flags to encode # # @return [ String ] Buffer that received the serialized vector def serialize(buffer, value, validating_keys = BSON::Config.validating_keys?) bits = 0 value.each { |flag| bits |= (@masks[flag] || 0) } buffer.put_int32(bits) end # Deserializes vector by decoding the symbol according to its mask # # @param [ String ] buffer Buffer containing the vector to be deserialized. # # @return [ Array ] Flags contained in the vector def deserialize(buffer) vector = buffer.get_int32 flags = [] @masks.each do |flag, mask| flags << flag if mask & vector != 0 end flags end end end end end mongo-2.5.1/lib/mongo/protocol/update.rb0000644000004100000410000001412313257253113020226 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol Update message. # # This is a client request message that is sent to the server in order # to update documents matching the provided query. # # The default is to update a single document. In order to update many at # a time users should set the +:multi_update+ flag for the update. # # If an upsert (update or insert) is desired, users can set the +:upsert+ # flag in order to indicate they would like to insert the merged selector # and update if no document matching the update query currently exists. # # @api semipublic class Update < Message # Creates a new Update message # # @example Update single document # Update.new('xgen', 'users', {:name => 'Tyler'}, {:name => 'Bob'}) # # @example Perform a multi update # Update.new('xgen', 'users', # {:age => 20}, {:age => 21}, :flags => [:multi_update]) # # @example Perform an upsert # Update.new('xgen', 'users', {:name => 'Tyler'}, :flags => [:upsert]) # # @param database [String, Symbol] The database to update. # @param collection [String, Symbol] The collection to update. # @param selector [Hash] The update selector. # @param update [Hash] The update to perform. # @param options [Hash] The additional query options. # # @option options :flags [Array] The flags for the update message. # # Supported flags: +:upsert+, +:multi_update+ def initialize(database, collection, selector, update, options = {}) @database = database @collection = collection @namespace = "#{database}.#{collection}" @selector = selector @update = update @flags = options[:flags] || [] @upconverter = Upconverter.new(collection, selector, update, flags) super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: 'update', database_name: @database, command: upconverter.command, request_id: request_id ) end protected attr_reader :upconverter private # The operation code required to specify an Update message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2001 # Available flags for an Update message. FLAGS = [:upsert, :multi_update] # Field representing Zero encoded as an Int32. field :zero, Zero # @!attribute # @return [String] The namespace for this Update message. field :namespace, CString # @!attribute # @return [Array] The flags for this Update message. field :flags, BitVector.new(FLAGS) # @!attribute # @return [Hash] The selector for this Update message. field :selector, Document # @!attribute # @return [Hash] The update for this Delete message. field :update, Document # Converts legacy update messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # The multi constant. # # @since 2.2.0 MULTI = 'multi'.freeze # The u constant. # # @since 2.2.0 U = 'u'.freeze # The update constant. # # @since 2.2.0 UPDATE = 'update'.freeze # The updates constant. # # @since 2.2.0 UPDATES = 'updates'.freeze # The upsert constant. # # @since 2.2.0 UPSERT = 'upsert'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ Hash ] filter The filter. attr_reader :filter # @return [ Hash ] update The update. attr_reader :update # @return [ Array ] flags The flags. attr_reader :flags # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new( # 'users', # { name: 'test' }, # { '$set' => { 'name' => 't' }}, # [] # ) # # @param [ String ] collection The name of the collection. # @param [ Hash ] filter The filter. # @param [ Hash ] update The update. # @param [ Array ] flags The flags. # # @since 2.1.0 def initialize(collection, filter, update, flags) @collection = collection @filter = filter @update = update @flags = flags end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command document = BSON::Document.new updates = BSON::Document.new updates.store(Message::Q, filter) updates.store(U, update) updates.store(MULTI, flags.include?(:multi_update)) updates.store(UPSERT, flags.include?(:upsert)) document.store(UPDATE, collection) document.store(Message::ORDERED, true) document.store(UPDATES, [ updates ]) document end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/reply.rb0000644000004100000410000001272413257253113020104 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # The MongoDB wire protocol message representing a reply # # @example # socket = TCPSocket.new('localhost', 27017) # query = Protocol::Query.new('xgen', 'users', {:name => 'Tyler'}) # socket.write(query) # reply = Protocol::Reply::deserialize(socket) # # @api semipublic class Reply < Message # Determine if the reply had a query failure flag. # # @example Did the reply have a query failure. # reply.query_failure? # # @return [ true, false ] If the query failed. # # @since 2.0.5 def query_failure? flags.include?(:query_failure) end # Determine if the reply had a cursor not found flag. # # @example Did the reply have a cursor not found flag. # reply.cursor_not_found? # # @return [ true, false ] If the query cursor was not found. # # @since 2.2.3 def cursor_not_found? flags.include?(:cursor_not_found) end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( reply: upconverter.command, request_id: request_id ) end private def upconverter @upconverter ||= Upconverter.new(documents, cursor_id, starting_from) end # The operation code required to specify a Reply message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 1 # Available flags for a Reply message. FLAGS = [ :cursor_not_found, :query_failure, :shard_config_stale, :await_capable ] public # @!attribute # @return [Array] The flags for this reply. # # Supported flags: +:cursor_not_found+, +:query_failure+, # +:shard_config_stale+, +:await_capable+ field :flags, BitVector.new(FLAGS) # @!attribute # @return [Fixnum] The cursor id for this response. Will be zero # if there are no additional results. field :cursor_id, Int64 # @!attribute # @return [Fixnum] The starting position of the cursor for this Reply. field :starting_from, Int32 # @!attribute # @return [Fixnum] Number of documents in this Reply. field :number_returned, Int32 # @!attribute # @return [Array] The documents in this Reply. field :documents, Document, :@number_returned # Upconverts legacy replies to new op command replies. # # @since 2.1.0 class Upconverter # Next batch constant. # # @since 2.1.0 NEXT_BATCH = 'nextBatch'.freeze # First batch constant. # # @since 2.1.0 FIRST_BATCH = 'firstBatch'.freeze # Cursor field constant. # # @since 2.1.0 CURSOR = 'cursor'.freeze # Id field constant. # # @since 2.1.0 ID = 'id'.freeze # @return [ Array ] documents The documents. attr_reader :documents # @return [ Integer ] cursor_id The cursor id. attr_reader :cursor_id # @return [ Integer ] starting_from The starting point in the cursor. attr_reader :starting_from # Initialize the new upconverter. # # @example Create the upconverter. # Upconverter.new(docs, 1, 3) # # @param [ Array ] documents The documents. # @param [ Integer ] cursor_id The cursor id. # @param [ Integer ] starting_from The starting position. # # @since 2.1.0 def initialize(documents, cursor_id, starting_from) @documents = documents @cursor_id = cursor_id @starting_from = starting_from end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The command. # # @since 2.1.0 def command command? ? op_command : find_command end private def batch_field starting_from > 0 ? NEXT_BATCH : FIRST_BATCH end def command? !documents.empty? && documents.first.key?(Operation::Result::OK) end def find_command document = BSON::Document.new cursor_document = BSON::Document.new cursor_document.store(ID, cursor_id) cursor_document.store(batch_field, documents) document.store(Operation::Result::OK, 1) document.store(CURSOR, cursor_document) document end def op_command documents.first end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/insert.rb0000644000004100000410000001264013257253113020252 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol Insert message. # # This is a client request message that is sent to the server in order # to insert documents within a namespace. # # The operation only has one flag +:continue_on_error+ which the user # can use to instruct the database server to continue processing a bulk # insertion if one happens to fail (e.g. due to duplicate IDs). This makes # builk insert behave similarly to a seires of single inserts, except # lastError will be set if any insert fails, not just the last one. # # If multiple errors occur, only the most recent will be reported by the # getLastError mechanism. # # @api semipublic class Insert < Message # Creates a new Insert message # # @example Insert a user document # Insert.new('xgen', 'users', [{:name => 'Tyler'}]) # # @example Insert serveral user documents and continue on errors # Insert.new('xgen', 'users', users, :flags => [:continue_on_error]) # # @param database [String, Symbol] The database to insert into. # @param collection [String, Symbol] The collection to insert into. # @param documents [Array] The documents to insert. # @param options [Hash] Additional options for the insertion. # # @option options :flags [Array] The flags for the insertion message. # # Supported flags: +:continue_on_error+ def initialize(database, collection, documents, options = {}) @database = database @namespace = "#{database}.#{collection}" @documents = documents @flags = options[:flags] || [] @upconverter = Upconverter.new(collection, documents, options) @options = options super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: 'insert', database_name: @database, command: upconverter.command, request_id: request_id ) end protected attr_reader :upconverter private def validating_keys? @options.fetch(:validating_keys, true) end # The operation code required to specify an Insert message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2002 # Available flags for an Insert message. FLAGS = [:continue_on_error] # @!attribute # @return [Array] The flags for this Insert message. field :flags, BitVector.new(FLAGS) # @!attribute # @return [String] The namespace for this Insert message. field :namespace, CString # @!attribute # @return [Array] The documents to insert. field :documents, Document, true # Converts legacy insert messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # Insert field constant. # # @since 2.1.0 INSERT = 'insert'.freeze # Documents field constant. # # @since 2.1.0 DOCUMENTS = 'documents'.freeze # Write concern field constant. # # @since 2.1.0 WRITE_CONCERN = 'writeConcern'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ Array ] documents The documents to insert. attr_reader :documents # @return [ Hash ] options The options. attr_reader :options # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new('users', documents) # # @param [ String ] collection The name of the collection. # @param [ Array ] documents The documents. # @param [ Hash ] options The options. # # @since 2.1.0 def initialize(collection, documents, options) @collection = collection @documents = documents @options = options end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command document = BSON::Document.new document.store(INSERT, collection) document.store(DOCUMENTS, documents) document.store(Message::ORDERED, options.fetch(:ordered, true)) document.merge!(WRITE_CONCERN => options[:write_concern].options) if options[:write_concern] document end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/query.rb0000644000004100000410000002312213257253113020110 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol Query message. # # This is a client request message that is sent to the server in order # to retrieve documents matching provided query. # # Users may also provide additional options such as a projection, to # select a subset of the fields, a number to skip or a limit on the # number of returned documents. # # There are a variety of flags that can be used to adjust cursor # parameters or the desired consistency and integrity the results. # # @api semipublic class Query < Message include Monitoring::Event::Secure # Creates a new Query message # # @example Find all users named Tyler. # Query.new('xgen', 'users', {:name => 'Tyler'}) # # @example Find all users named Tyler skipping 5 and returning 10. # Query.new('xgen', 'users', {:name => 'Tyler'}, :skip => 5, # :limit => 10) # # @example Find all users with slave ok bit set # Query.new('xgen', 'users', {:name => 'Tyler'}, :flags => [:slave_ok]) # # @example Find all user ids. # Query.new('xgen', 'users', {}, :fields => {:id => 1}) # # @param database [String, Symbol] The database to query. # @param collection [String, Symbol] The collection to query. # @param selector [Hash] The query selector. # @param options [Hash] The additional query options. # # @option options :project [Hash] The projection. # @option options :skip [Integer] The number of documents to skip. # @option options :limit [Integer] The number of documents to return. # @option options :flags [Array] The flags for the query message. # # Supported flags: +:tailable_cursor+, +:slave_ok+, +:oplog_replay+, # +:no_cursor_timeout+, +:await_data+, +:exhaust+, +:partial+ def initialize(database, collection, selector, options = {}) @database = database @namespace = "#{database}.#{collection}" @selector = selector @options = options @project = options[:project] @limit = determine_limit @skip = options[:skip] || 0 @flags = options[:flags] || [] @upconverter = Upconverter.new(collection, selector, options, flags) super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: upconverter.command_name, database_name: @database, command: upconverter.command, request_id: request_id ) end # Query messages require replies from the database. # # @example Does the message require a reply? # message.replyable? # # @return [ true ] Always true for queries. # # @since 2.0.0 def replyable? true end # Compress this message. # # @param [ String, Symbol ] compressor The compressor to use. # @param [ Integer ] zlib_compression_level The zlib compression level to use. # # @return [ Compressed, self ] A Protocol::Compressed message or self, depending on whether # this message can be compressed. # # @since 2.5.0 def compress!(compressor, zlib_compression_level = nil) if compressor && compression_allowed?(selector.keys.first) Compressed.new(self, compressor, zlib_compression_level) else self end end protected attr_reader :upconverter private # The operation code required to specify a Query message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2004 def determine_limit [ @options[:limit] || @options[:batch_size], @options[:batch_size] || @options[:limit] ].min || 0 end # Available flags for a Query message. FLAGS = [ :reserved, :tailable_cursor, :slave_ok, :oplog_replay, :no_cursor_timeout, :await_data, :exhaust, :partial ] # @!attribute # @return [Array] The flags for this query message. field :flags, BitVector.new(FLAGS) # @!attribute # @return [String] The namespace for this query message. field :namespace, CString # @!attribute # @return [Integer] The number of documents to skip. field :skip, Int32 # @!attribute # @return [Integer] The number of documents to return. field :limit, Int32 # @!attribute # @return [Hash] The query selector. field :selector, Document # @!attribute # @return [Hash] The projection. field :project, Document # Converts legacy query messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # Mappings of the options to the find command options. # # @since 2.1.0 OPTION_MAPPINGS = { :project => 'projection', :skip => 'skip', :limit => 'limit', :batch_size => 'batchSize' }.freeze SPECIAL_FIELD_MAPPINGS = { :$readPreference => 'readPreference', :$orderby => 'sort', :$hint => 'hint', :$comment => 'comment', :$returnKey => 'returnKey', :$snapshot => 'snapshot', :$maxScan => 'maxScan', :$max => 'max', :$min => 'min', :$maxTimeMS => 'maxTimeMS', :$showDiskLoc => 'showRecordId', :$explain => 'explain' }.freeze # Mapping of flags to find command options. # # @since 2.1.0 FLAG_MAPPINGS = { :tailable_cursor => 'tailable', :oplog_replay => 'oplogReplay', :no_cursor_timeout => 'noCursorTimeout', :await_data => 'awaitData', :partial => 'allowPartialResults' }.freeze # Find command constant. # # @since 2.1.0 FIND = 'find'.freeze # Filter attribute constant. # # @since 2.1.0 FILTER = 'filter'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ BSON::Document, Hash ] filter The query filter or command. attr_reader :filter # @return [ BSON::Document, Hash ] options The options. attr_reader :options # @return [ Array ] flags The flags. attr_reader :flags # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new('users', { name: 'test' }, { skip: 10 }) # # @param [ String ] collection The name of the collection. # @param [ BSON::Document, Hash ] filter The filter or command. # @param [ BSON::Document, Hash ] options The options. # @param [ Array ] flags The flags. # # @since 2.1.0 def initialize(collection, filter, options, flags) @collection = collection @filter = filter @options = options @flags = flags end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command command? ? op_command : find_command end # Get the name of the command. If the collection is $cmd then it's the # first key in the filter, otherwise it's a find. # # @example Get the command name. # upconverter.command_name # # @return [ String, Symbol ] The command name. # # @since 2.1.0 def command_name (filter[:$query] || !command?) ? FIND : filter.keys.first end private def command? collection == Database::COMMAND end def query_filter filter[:$query] || filter end def op_command document = BSON::Document.new query_filter.each do |field, value| document.store(field.to_s, value) end document end def find_command document = BSON::Document.new document.store(FIND, collection) document.store(FILTER, query_filter) OPTION_MAPPINGS.each do |legacy, option| document.store(option, options[legacy]) unless options[legacy].nil? end SPECIAL_FIELD_MAPPINGS.each do |special, normal| document.store(normal, filter[special]) unless filter[special].nil? end FLAG_MAPPINGS.each do |legacy, flag| document.store(flag, true) if flags.include?(legacy) end document end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/kill_cursors.rb0000644000004100000410000000743413257253113021466 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol KillCursors message. # # This is a client request message that is sent to the server in order # to kill a number of cursors. # # @api semipublic class KillCursors < Message # Creates a new KillCursors message # # @example Kill the cursor on the server with id 1. # KillCursors.new([1]) # # @param [ Mongo::Database ] collection The collection. # @param [ Mongo::Database ] database The database. # @param [ Array ] cursor_ids The cursor ids to kill. def initialize(collection, database, cursor_ids) @database = database @cursor_ids = cursor_ids @id_count = @cursor_ids.size @upconverter = Upconverter.new(collection, cursor_ids) super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: 'killCursors', database_name: @database, command: upconverter.command, request_id: request_id ) end protected attr_reader :upconverter private # The operation code required to specify +KillCursors+ message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2007 # Field representing Zero encoded as an Int32. field :zero, Zero # @!attribute # @return [Fixnum] Count of the number of cursor ids. field :id_count, Int32 # @!attribute # @return [Array] Cursors to kill. field :cursor_ids, Int64, true # Converts legacy insert messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # The kill cursors constant. # # @since 2.2.0 KILL_CURSORS = 'killCursors'.freeze # The cursors constant. # # @since 2.2.0 CURSORS = 'cursors'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ Array ] cursor_ids The cursor ids. attr_reader :cursor_ids # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new('users', [ 1, 2, 3 ]) # # @param [ String ] collection The name of the collection. # @param [ Array ] cursor_ids The cursor ids. # # @since 2.1.0 def initialize(collection, cursor_ids) @collection = collection @cursor_ids = cursor_ids end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command document = BSON::Document.new document.store(KILL_CURSORS, collection) document.store(CURSORS, cursor_ids) document end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/compressed.rb0000644000004100000410000001025513257253113021112 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'zlib' module Mongo module Protocol # MongoDB Wire protocol Compressed message. # # This is a bi-directional message that compresses another opcode. # # @api semipublic # # @since 2.5.0 class Compressed < Message # The byte signaling that the message has been compressed with Zlib. # # @since 2.5.0 ZLIB_BYTE = 2.chr.force_encoding(BSON::BINARY).freeze # The Zlib compressor identifier. # # @since 2.5.0 ZLIB = 'zlib'.freeze # The compressor identifier to byte map. # # @since 2.5.0 COMPRESSOR_ID_MAP = { ZLIB => ZLIB_BYTE }.freeze # Creates a new OP_COMPRESSED message. # # @example Create an OP_COMPRESSED message. # Compressed.new(original_message, 'zlib') # # @param [ Mongo::Protocol::Message ] message The original message. # @param [ String, Symbol ] compressor The compression algorithm to use. # @param [ Integer ] zlib_compression_level The zlib compression level to use. # -1 and nil imply default. # # @since 2.5.0 def initialize(message, compressor, zlib_compression_level = nil) @original_message = message @original_op_code = message.op_code @uncompressed_size = 0 @compressor_id = COMPRESSOR_ID_MAP[compressor] @compressed_message = '' @zlib_compression_level = zlib_compression_level if zlib_compression_level && zlib_compression_level != -1 @request_id = message.set_request_id end # Inflate an OP_COMRESSED message and return the original message. # # @example Inflate a compressed message. # message.inflate! # # @return [ Protocol::Message ] The inflated message. # # @since 2.5.0 def inflate! message = Registry.get(@original_op_code).allocate uncompressed_message = Zlib::Inflate.inflate(@compressed_message) buf = BSON::ByteBuffer.new(uncompressed_message) message.send(:fields).each do |field| if field[:multi] Message.deserialize_array(message, buf, field) else Message.deserialize_field(message, buf, field) end end message end # Whether the message expects a reply from the database. # # @example Does the message require a reply? # message.replyable? # # @return [ true, false ] If the message expects a reply. # # @since 2.5.0 def replyable? @original_message.replyable? end private # The operation code for a +Compressed+ message. # @return [ Fixnum ] the operation code. # # @since 2.5.0 OP_CODE = 2012 # @!attribute # Field representing the original message's op code as an Int32. field :original_op_code, Int32 # @!attribute # @return [ Fixnum ] The size of the original message, excluding header as an Int32. field :uncompressed_size, Int32 # @!attribute # @return [ String ] The id of the compressor as a single byte. field :compressor_id, Byte # @!attribute # @return [ String ] The actual compressed message bytes. field :compressed_message, Bytes def serialize_fields(buffer, max_bson_size) buf = BSON::ByteBuffer.new @original_message.send(:serialize_fields, buf, max_bson_size) @uncompressed_size = buf.length @compressed_message = Zlib::Deflate.deflate(buf.to_s, @zlib_compression_level).force_encoding(BSON::BINARY) super end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/message.rb0000644000004100000410000002500413257253113020370 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # A base class providing functionality required by all messages in # the MongoDB wire protocol. It provides a minimal DSL for defining typed # fields to enable serialization and deserialization over the wire. # # @example # class WireProtocolMessage < Message # # private # # def op_code # 1234 # end # # FLAGS = [:first_bit, :bit_two] # # # payload # field :flags, BitVector.new(FLAGS) # field :namespace, CString # field :document, Document # field :documents, Document, true # end # # @abstract # @api semiprivate class Message include Serializers # The batch size constant. # # @since 2.2.0 BATCH_SIZE = 'batchSize'.freeze # The collection constant. # # @since 2.2.0 COLLECTION = 'collection'.freeze # The limit constant. # # @since 2.2.0 LIMIT = 'limit'.freeze # The ordered constant. # # @since 2.2.0 ORDERED = 'ordered'.freeze # The q constant. # # @since 2.2.0 Q = 'q'.freeze # Default max message size of 48MB. # # @since 2.2.1 MAX_MESSAGE_SIZE = 50331648.freeze # Returns the request id for the message # # @return [Fixnum] The request id for this message attr_reader :request_id def initialize(*args) # :nodoc: @request_id = nil end # The default for messages is not to require a reply after sending a # message to the server. # # @example Does the message require a reply? # message.replyable? # # @return [ false ] The default is to not require a reply. # # @since 2.0.0 def replyable? false end # Compress a message. # # @param [ String, Symbol ] compressor The compressor to use. # @param [ Integer ] zlib_compression_level The zlib compression level to use. # # @return [ self ] Always returns self. Other message types should override this method. # # @since 2.5.0 def compress!(compressor, zlib_compression_level = nil) self end # Inflate a message. # # @return [ self ] Always returns self. Other message types should override this method. # # @since 2.5.0 def inflate! self end # Serializes message into bytes that can be sent on the wire # # @param buffer [String] buffer where the message should be inserted # @return [String] buffer containing the serialized message def serialize(buffer = BSON::ByteBuffer.new, max_bson_size = nil) start = buffer.length serialize_header(buffer) serialize_fields(buffer, max_bson_size) buffer.replace_int32(start, buffer.length - start) end alias_method :to_s, :serialize # Deserializes messages from an IO stream # # @param [ Integer ] max_message_size The max message size. # @param [ IO ] io Stream containing a message # # @return [ Message ] Instance of a Message class def self.deserialize(io, max_message_size = MAX_MESSAGE_SIZE, expected_response_to = nil) length, _request_id, response_to, _op_code = deserialize_header(BSON::ByteBuffer.new(io.read(16))) # Protection from potential DOS man-in-the-middle attacks. See # DRIVERS-276. if length > (max_message_size || MAX_MESSAGE_SIZE) raise Error::MaxMessageSize.new(max_message_size) end # Protection against returning the response to a previous request. See # RUBY-1117 if expected_response_to && response_to != expected_response_to raise Error::UnexpectedResponse.new(expected_response_to, response_to) end message = Registry.get(_op_code).allocate buffer = BSON::ByteBuffer.new(io.read(length - 16)) message.send(:fields).each do |field| if field[:multi] deserialize_array(message, buffer, field) else deserialize_field(message, buffer, field) end end message.inflate! end # Tests for equality between two wire protocol messages # by comparing class and field values. # # @param other [Mongo::Protocol::Message] The wire protocol message. # @return [true, false] The equality of the messages. def ==(other) return false if self.class != other.class fields.all? do |field| name = field[:name] instance_variable_get(name) == other.instance_variable_get(name) end end alias_method :eql?, :== # Creates a hash from the values of the fields of a message. # # @return [ Fixnum ] The hash code for the message. def hash fields.map { |field| instance_variable_get(field[:name]) }.hash end # Generates a request id for a message # # @return [Fixnum] a request id used for sending a message to the # server. The server will put this id in the response_to field of # a reply. def set_request_id @@id_lock.synchronize do @request_id = @@request_id += 1 end end # Default number returned value for protocol messages. # # @return [ 0 ] This method must be overridden, otherwise, always returns 0. # # @since 2.5.0 def number_returned; 0; end private @@request_id = 0 @@id_lock = Mutex.new # A method for getting the fields for a message class # # @return [Integer] the fields for the message class def fields self.class.fields end # A class method for getting the fields for a message class # # @return [Integer] the fields for the message class def self.fields @fields ||= [] end # Serializes message fields into a buffer # # @param buffer [String] buffer to receive the field # @return [String] buffer with serialized field def serialize_fields(buffer, max_bson_size = nil) fields.each do |field| value = instance_variable_get(field[:name]) if field[:multi] value.each do |item| if field[:type].respond_to?(:size_limited?) field[:type].serialize(buffer, item, max_bson_size, validating_keys?) else field[:type].serialize(buffer, item, validating_keys?) end end else if field[:type].respond_to?(:size_limited?) field[:type].serialize(buffer, value, max_bson_size, validating_keys?) else field[:type].serialize(buffer, value, validating_keys?) end end end end # Serializes the header of the message consisting of 4 32bit integers # # The integers represent a message length placeholder (calculation of # the actual length is deferred) the request id, the response to id, # and the op code for the message # # Currently uses hardcoded 0 for request id and response to as their # values are irrelevent to the server # # @param buffer [String] Buffer to receive the header # @return [String] Serialized header def serialize_header(buffer) set_request_id unless @request_id Header.serialize(buffer, [0, request_id, 0, op_code]) end # Deserializes the header of the message # # @param io [IO] Stream containing the header. # @return [Array] Deserialized header. def self.deserialize_header(io) Header.deserialize(io) end # A method for declaring a message field # # @param name [String] Name of the field # @param type [Module] Type specific serialization strategies # @param multi [true, false, Symbol] Specify as +true+ to # serialize the field's value as an array of type +:type+ or as a # symbol describing the field having the number of items in the # array (used upon deserialization) # # Note: In fields where multi is a symbol representing the field # containing number items in the repetition, the field containing # that information *must* be deserialized prior to deserializing # fields that use the number. # # @return [NilClass] def self.field(name, type, multi = false) fields << { :name => "@#{name}".intern, :type => type, :multi => multi } attr_reader name end # Deserializes an array of fields in a message # # The number of items in the array must be described by a previously # deserialized field specified in the class by the field dsl under # the key +:multi+ # # @param message [Message] Message to contain the deserialized array. # @param io [IO] Stream containing the array to deserialize. # @param field [Hash] Hash representing a field. # @return [Message] Message with deserialized array. def self.deserialize_array(message, io, field) elements = [] count = message.instance_variable_get(field[:multi]) count.times { elements << field[:type].deserialize(io) } message.instance_variable_set(field[:name], elements) end # Deserializes a single field in a message # # @param message [Message] Message to contain the deserialized field. # @param io [IO] Stream containing the field to deserialize. # @param field [Hash] Hash representing a field. # @return [Message] Message with deserialized field. def self.deserialize_field(message, io, field) message.instance_variable_set( field[:name], field[:type].deserialize(io) ) end def validating_keys? @options[:validating_keys] if @options end end end end mongo-2.5.1/lib/mongo/protocol/delete.rb0000644000004100000410000001156113257253113020211 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol Delete message. # # This is a client request message that is sent to the server in order # to delete selected documents in the specified namespace. # # The operation, by default, operates on many documents. Setting # the +:single_remove+ flag allows for a single matching document # to be removed. # # @api semipublic class Delete < Message # Creates a new Delete message # # @example Remove all users named Tyler. # Query.new('xgen', 'users', {:name => 'Tyler'}) # # @param database [String, Symbol] The database to remove from. # @param collection [String, Symbol] The collection to remove from. # @param selector [Hash] The query used to select doc(s) to remove. # @param options [Hash] The additional delete options. # # @option options :flags [Array] The flags for the delete message. # # Supported flags: +:single_remove+ def initialize(database, collection, selector, options = {}) @database = database @namespace = "#{database}.#{collection}" @selector = selector @flags = options[:flags] || [] @upconverter = Upconverter.new(collection, selector, options) super end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.1.0 def payload BSON::Document.new( command_name: 'delete', database_name: @database, command: upconverter.command, request_id: request_id ) end protected attr_reader :upconverter private # The operation code required to specify a Delete message. # @return [Fixnum] the operation code. # # @since 2.5.0 OP_CODE = 2006 # Available flags for a Delete message. FLAGS = [:single_remove] # Field representing Zero encoded as an Int32. field :zero, Zero # @!attribute # @return [String] The namespace for this Delete message. field :namespace, CString # @!attribute # @return [Array] The flags for this Delete message. field :flags, BitVector.new(FLAGS) # @!attribute # @return [Hash] The selector for this Delete message. field :selector, Document # Converts legacy delete messages to the appropriare OP_COMMAND style # message. # # @since 2.1.0 class Upconverter # The delete command constant. # # @since 2.2.0 DELETE = 'delete'.freeze # The deletes command constant. # # @since 2.2.0 DELETES = 'deletes'.freeze # @return [ String ] collection The name of the collection. attr_reader :collection # @return [ BSON::Document, Hash ] filter The query filter or command. attr_reader :filter # @return [ Hash ] options The options. attr_reader :options # Instantiate the upconverter. # # @example Instantiate the upconverter. # Upconverter.new('users', { name: 'test' }) # # @param [ String ] collection The name of the collection. # @param [ BSON::Document, Hash ] filter The filter or command. # # @since 2.1.0 def initialize(collection, filter, options) @collection = collection @filter = filter @options = options end # Get the upconverted command. # # @example Get the command. # upconverter.command # # @return [ BSON::Document ] The upconverted command. # # @since 2.1.0 def command document = BSON::Document.new document.store(DELETE, collection) document.store(DELETES, [ BSON::Document.new(Message::Q => filter, Message::LIMIT => limit) ]) document.store(Message::ORDERED, true) document end private def limit if options.key?(:flags) options[:flags].include?(:single_remove) ? 1 : 0 else 0 end end end Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/protocol/registry.rb0000644000004100000410000000416613257253113020622 0ustar www-datawww-data# Copyright (C) 2009-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # Provides a registry for looking up a message class based on op code. # # @since 2.5.0 module Registry extend self # A Mapping of all the op codes to their corresponding Ruby classes. # # @since 2.5.0 MAPPINGS = {} # Get the class for the given op code and raise an error if it's not found. # # @example Get the type for the op code. # Mongo::Protocol::Registry.get(1) # # @return [ Class ] The corresponding Ruby class for the message type. # # @since 2.5.0 def get(op_code, message = nil) if type = MAPPINGS[op_code] type else handle_unsupported_op_code!(op_code) end end # Register the Ruby type for the corresponding op code. # # @example Register the op code. # Mongo::Protocol::Registry.register(1, Reply) # # @param [ Fixnum ] op_code The op code. # @param [ Class ] type The class the op code maps to. # # @return [ Class ] The class. # # @since 2.5.0 def register(op_code, type) MAPPINGS.store(op_code, type) define_type_reader(type) end private def define_type_reader(type) type.module_eval <<-MOD def op_code; OP_CODE; end MOD end def handle_unsupported_op_code!(op_code) message = "Detected unknown message type with op code: #{op_code}." raise Error::UnsupportedMessageType.new(message) end end end end mongo-2.5.1/lib/mongo/protocol/msg.rb0000644000004100000410000001264713257253113017543 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Protocol # MongoDB Wire protocol Msg message (OP_MSG), a bi-directional wire protocol opcode. # # OP_MSG is only available in MongoDB 3.6 (maxWireVersion >= 6) and later. # # @api private # # @since 2.5.0 class Msg < Message include Monitoring::Event::Secure # The identifier for the database name to execute the command on. # # @since 2.5.0 DATABASE_IDENTIFIER = '$db'.freeze # Creates a new OP_MSG protocol message # # @example Create a OP_MSG wire protocol message # Msg.new([:more_to_come], {}, { ismaster: 1 }, # { type: 1, payload: { identifier: 'documents', sequence: [..] } }) # # @param [ Array ] flags The flag bits. Current supported values are # :more_to_come and :checksum_present. # @param [ Hash ] options The options. There are currently no supported options, this is a # place-holder for the future. # @param [ BSON::Document, Hash ] global_args The global arguments, becomes a section of payload type 0. # @param [ BSON::Document, Hash ] sections Zero or more sections, in the format # { type: 1, payload: { identifier: , sequence: > } } or # { type: 0, payload: } # # @option options [ true, false ] validating_keys Whether keys should be validated. # # @api private # # @since 2.5.0 def initialize(flags, options, global_args, *sections) @flags = flags || [ :none ] @options = options @global_args = global_args @sections = [ { type: 0, payload: global_args } ] + sections @request_id = nil super end # Whether the message expects a reply from the database. # # @example Does the message require a reply? # message.replyable? # # @return [ true, false ] If the message expects a reply. # # @since 2.5.0 def replyable? @replyable ||= !flags.include?(:more_to_come) end # Return the event payload for monitoring. # # @example Return the event payload. # message.payload # # @return [ BSON::Document ] The event payload. # # @since 2.5.0 def payload BSON::Document.new( command_name: command.keys.first, database_name: global_args[DATABASE_IDENTIFIER], command: command, request_id: request_id, reply: sections[0] ) end # Serializes message into bytes that can be sent on the wire. # # @param [ BSON::ByteBuffer ] buffer where the message should be inserted. # @param [ Integer ] max_bson_size The maximum bson object size. # # @return [ BSON::ByteBuffer ] buffer containing the serialized message. # # @since 2.5.0 def serialize(buffer = BSON::ByteBuffer.new, max_bson_size = nil) super add_check_sum(buffer) buffer end # Compress this message. # # @param [ String, Symbol ] compressor The compressor to use. # @param [ Integer ] zlib_compression_level The zlib compression level to use. # # @return [ Compressed, self ] A Protocol::Compressed message or self, depending on whether # this message can be compressed. # # @since 2.5.0 def compress!(compressor, zlib_compression_level = nil) if compressor && compression_allowed?(command.keys.first) Compressed.new(self, compressor, zlib_compression_level) else self end end private def command @command ||= global_args.dup.tap do |cmd| cmd.delete(DATABASE_IDENTIFIER) sections.each do |section| if section[:type] == 1 identifier = section[:payload][:identifier] cmd[identifier] ||= [] cmd[identifier] += section[:payload][:sequence] end end end end def add_check_sum(buffer) if flags.include?(:checksum_present) #buffer.put_int32(checksum) end end def global_args @global_args ||= (sections[0] || {}) end # The operation code required to specify a OP_MSG message. # @return [ Fixnum ] the operation code. # # @since 2.5.0 OP_CODE = 2013 # Available flags for a OP_MSG message. FLAGS = Array.new(16).tap { |arr| arr[0] = :checksum_present arr[1] = :more_to_come } # @!attribute # @return [Array] The flags for this message. field :flags, BitVector.new(FLAGS) # @!attribute # @return [Hash] The sections of payload type 1 or 0. field :sections, Sections alias :documents :sections Registry.register(OP_CODE, self) end end end mongo-2.5.1/lib/mongo/cluster/0000755000004100000410000000000013257253113016236 5ustar www-datawww-datamongo-2.5.1/lib/mongo/cluster/topology/0000755000004100000410000000000013257253113020112 5ustar www-datawww-datamongo-2.5.1/lib/mongo/cluster/topology/replica_set.rb0000644000004100000410000002454213257253113022740 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster module Topology # Defines behaviour when a cluster is in replica set topology. # # @since 2.0.0 class ReplicaSet include Loggable include Monitoring::Publishable # Constant for the replica set name configuration option. # # @since 2.0.0 REPLICA_SET_NAME = :replica_set.freeze # @return [ Hash ] options The options. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # The display name for the topology. # # @since 2.0.0 NAME = 'Replica Set'.freeze # Get the display name. # # @example Get the display name. # ReplicaSet.display_name # # @return [ String ] The display name. # # @since 2.0.0 def display_name NAME end # Elect a primary server within this topology. # # @example Elect a primary server. # topology.elect_primary(description, servers) # # @param [ Server::Description ] description The description of the # elected primary. # @param [ Array ] servers The list of known servers to the # cluster. # # @return [ ReplicaSet ] The topology. def elect_primary(description, servers) if description.replica_set_name == replica_set_name unless detect_stale_primary!(description) servers.each do |server| if server.primary? && server.address != description.address server.description.unknown! end end update_max_election_id(description) update_max_set_version(description) end else log_warn( "Server #{description.address.to_s} has incorrect replica set name: " + "'#{description.replica_set_name}'. The current replica set name is '#{replica_set_name}'." ) end self end # Determine if the topology would select a readable server for the # provided candidates and read preference. # # @example Is a readable server present? # topology.has_readable_server?(cluster, server_selector) # # @param [ Cluster ] cluster The cluster. # @param [ ServerSelector ] server_selector The server # selector. # # @return [ true, false ] If a readable server is present. # # @since 2.4.0 def has_readable_server?(cluster, server_selector = nil) (server_selector || ServerSelector.get(mode: :primary)).candidates(cluster).any? end # Determine if the topology would select a writable server for the # provided candidates. # # @example Is a writable server present? # topology.has_writable_server?(servers) # # @param [ Cluster ] cluster The cluster. # # @return [ true, false ] If a writable server is present. # # @since 2.4.0 def has_writable_server?(cluster) cluster.servers.any?{ |server| server.primary? } end # Initialize the topology with the options. # # @example Initialize the topology. # ReplicaSet.new(options) # # @param [ Hash ] options The options. # @param [ Monitoring ] monitoring The monitoring. # @param [ Array ] seeds The seeds. # # @since 2.0.0 def initialize(options, monitoring, seeds = []) @options = options @monitoring = monitoring @max_election_id = nil @max_set_version = nil end # A replica set topology is a replica set. # # @example Is the topology a replica set? # ReplicaSet.replica_set? # # @return [ true ] Always true. # # @since 2.0.0 def replica_set?; true; end # Get the replica set name configured for this topology. # # @example Get the replica set name. # topology.replica_set_name # # @return [ String ] The name of the configured replica set. # # @since 2.0.0 def replica_set_name @replica_set_name ||= options[REPLICA_SET_NAME] end # Select appropriate servers for this topology. # # @example Select the servers. # ReplicaSet.servers(servers) # # @param [ Array ] servers The known servers. # # @return [ Array ] The servers in the replica set. # # @since 2.0.0 def servers(servers) servers.select do |server| (replica_set_name.nil? || server.replica_set_name == replica_set_name) && server.primary? || server.secondary? end end # Whether a server description's hosts may be added to the cluster. # # @example Check if a description's hosts may be added to the cluster. # topology.add_hosts?(description, servers) # # @param [ Mongo::Server::Description ] description The description. # @param [ Array ] servers The cluster servers. # # @return [ true, false ] Whether a description's hosts may be added. # # @since 2.0.6 def add_hosts?(description, servers) !!(member_of_this_set?(description) && (!has_primary?(servers) || description.primary?)) end # Whether a description can be used to remove hosts from the cluster. # # @example Check if a description can be used to remove hosts from the cluster. # topology.remove_hosts?(description) # # @param [ Mongo::Server::Description ] description The description. # # @return [ true, false ] Whether hosts may be removed from the cluster. # # @since 2.0.6 def remove_hosts?(description) !description.config.empty? && (description.primary? || description.me_mismatch? || description.hosts.empty? || !member_of_this_set?(description)) end # Whether a specific server in the cluster can be removed, given a description. # # @example Check if a specific server can be removed from the cluster. # topology.remove_server?(description, server) # # @param [ Mongo::Server::Description ] description The description. # @param [ Mongo::Serve ] server The server in question. # # @return [ true, false ] Whether the server can be removed from the cluster. # # @since 2.0.6 def remove_server?(description, server) remove_self?(description, server) || (member_of_this_set?(description) && !description.servers.empty? && !description.lists_server?(server)) end # A replica set topology is not sharded. # # @example Is the topology sharded? # ReplicaSet.sharded? # # @return [ false ] Always false. # # @since 2.0.0 def sharded?; false; end # A replica set topology is not single. # # @example Is the topology single? # ReplicaSet.single? # # @return [ false ] Always false. # # @since 2.0.0 def single?; false; end # A replica set topology is not unknown. # # @example Is the topology unknown? # ReplicaSet.unknown? # # @return [ false ] Always false. # # @since 2.0.0 def unknown?; false; end # Notify the topology that a standalone was discovered. # # @example Notify the topology that a standalone was discovered. # topology.standalone_discovered # # @return [ Topology::ReplicaSet ] Always returns self. # # @since 2.0.6 def standalone_discovered; self; end # Notify the topology that a member was discovered. # # @example Notify the topology that a member was discovered. # topology.member_discovered # # @since 2.4.0 def member_discovered; end; private def update_max_election_id(description) if description.election_id && (@max_election_id.nil? || description.election_id > @max_election_id) @max_election_id = description.election_id end end def update_max_set_version(description) if description.set_version && (@max_set_version.nil? || description.set_version > @max_set_version) @max_set_version = description.set_version end end def detect_stale_primary!(description) if description.election_id && description.set_version if @max_set_version && @max_election_id && (description.set_version < @max_set_version || (description.set_version == @max_set_version && description.election_id < @max_election_id)) description.unknown! end end end def has_primary?(servers) servers.find { |s| s.primary? } end def member_of_this_set?(description) description.replica_set_member? && description.replica_set_name == replica_set_name end def remove_self?(description, server) !member_of_this_set?(description) && description.is_server?(server) && !description.ghost? end end end end end mongo-2.5.1/lib/mongo/cluster/topology/unknown.rb0000644000004100000410000002110213257253113022132 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster module Topology # Defines behaviour for when a cluster is in an unknown state. # # @since 2.0.0 class Unknown include Loggable include Monitoring::Publishable # The display name for the topology. # # @since 2.0.0 NAME = 'Unknown'.freeze # @return [ Hash ] options The options. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # Get the display name. # # @example Get the display name. # Unknown.display_name # # @return [ String ] The display name. # # @since 2.0.0 def display_name NAME end # Elect a primary server within this topology. # # @example Elect a primary server. # topology.elect_primary(description, servers) # # @param [ Server::Description ] description The description of the # elected primary. # @param [ Array ] servers The list of known servers to the # cluster. # # @return [ Sharded, ReplicaSet ] The new topology. def elect_primary(description, servers) if description.mongos? sharded = Sharded.new(options, monitoring) topology_changed(sharded) sharded else initialize_replica_set(description, servers) end end # Determine if the topology would select a readable server for the # provided candidates and read preference. # # @example Is a readable server present? # topology.has_readable_server?(cluster, server_selector) # # @param [ Cluster ] cluster The cluster. # @param [ ServerSelector ] server_selector The server # selector. # # @return [ false ] An Unknown topology will never have a readable server. # # @since 2.4.0 def has_readable_server?(cluster, server_selector = nil); false; end # Determine if the topology would select a writable server for the # provided candidates. # # @example Is a writable server present? # topology.has_writable_server?(servers) # # @param [ Cluster ] cluster The cluster. # # @return [ false ] An Unknown topology will never have a writable server. # # @since 2.4.0 def has_writable_server?(cluster); false; end # Initialize the topology with the options. # # @example Initialize the topology. # Unknown.new(options) # # @param [ Hash ] options The options. # @param [ Monitoring ] monitoring The monitoring. # @param [ Array ] seeds The seeds. # # @since 2.0.0 def initialize(options, monitoring, seeds = []) @options = options @monitoring = monitoring @seeds = seeds end # An unknown topology is not a replica set. # # @example Is the topology a replica set? # Unknown.replica_set? # # @return [ false ] Always false. # # @since 2.0.0 def replica_set?; false; end # Unknown topologies have no replica set name. # # @example Get the replica set name. # unknown.replica_set_name # # @return [ nil ] Always nil. # # @since 2.0.0 def replica_set_name; nil; end # Select appropriate servers for this topology. # # @example Select the servers. # Unknown.servers(servers) # # @param [ Array ] servers The known servers. # # @raise [ Unknown ] Cannot select servers when the topology is # unknown. # # @since 2.0.0 def servers(servers) [] end # An unknown topology is not sharded. # # @example Is the topology sharded? # Unknown.sharded? # # @return [ false ] Always false. # # @since 2.0.0 def sharded?; false; end # An unknown topology is not single. # # @example Is the topology single? # Unknown.single? # # @return [ true ] Always false. # # @since 2.0.0 def single?; false; end # An unknown topology is unknown. # # @example Is the topology unknown? # Unknown.unknown? # # @return [ true ] Always true. # # @since 2.0.0 def unknown?; true; end # Whether a server description's hosts may be added to the cluster. # # @example Check if a description's hosts may be added to the cluster. # topology.add_hosts?(description, servers) # # @param [ Mongo::Server::Description ] description The description. # @param [ Array ] servers The cluster servers. # # @return [ true, false ] Whether a description's hosts may be added. # # @since 2.0.6 def add_hosts?(description, servers) !(description.unknown? || description.ghost?) end # Whether a description can be used to remove hosts from the cluster. # # @example Check if a description can be used to remove hosts from the cluster. # topology.remove_hosts?(description) # # @param [ Mongo::Server::Description ] description The description. # # @return [ true, false ] Whether hosts may be removed from the cluster. # # @since 2.0.6 def remove_hosts?(description) description.standalone? end # Whether a specific server in the cluster can be removed, given a description. # # @example Check if a specific server can be removed from the cluster. # topology.remove_server?(description, server) # # @param [ Mongo::Server::Description ] description The description. # @param [ Mongo::Serve ] server The server in question. # # @return [ true, false ] Whether the server can be removed from the cluster. # # @since 2.0.6 def remove_server?(description, server) description.standalone? && description.is_server?(server) end # Notify the topology that a standalone was discovered. # # @example Notify the topology that a standalone was discovered. # topology.standalone_discovered # # @return [ Topology::Unknown, Topology::Single ] Either self or a # new Single topology. # # @since 2.0.6 def standalone_discovered if @seeds.size == 1 single = Single.new(options, monitoring, @seeds) topology_changed(single) single else self end end # Notify the topology that a member was discovered. # # @example Notify the topology that a member was discovered. # topology.member_discovered # # @since 2.4.0 def member_discovered publish_sdam_event( Monitoring::TOPOLOGY_CHANGED, Monitoring::Event::TopologyChanged.new(self, self) ) end private def initialize_replica_set(description, servers) servers.each do |server| if server.standalone? && server.address != description.address server.description.unknown! end end replica_set = ReplicaSet.new(options.merge(:replica_set => description.replica_set_name), monitoring) topology_changed(replica_set) replica_set end def topology_changed(new_topology) publish_sdam_event( Monitoring::TOPOLOGY_CHANGED, Monitoring::Event::TopologyChanged.new(self, new_topology) ) end end end end end mongo-2.5.1/lib/mongo/cluster/topology/single.rb0000644000004100000410000001664413257253113021733 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster module Topology # Defines behaviour for when a cluster is in single topology. # # @since 2.0.0 class Single include Monitoring::Publishable # The display name for the topology. # # @since 2.0.0 NAME = 'Single'.freeze # @return [ Hash ] options The options. attr_reader :options # @return [ String ] seed The seed address. attr_reader :seed # @return [ monitoring ] monitoring the monitoring. attr_reader :monitoring # Get the display name. # # @example Get the display name. # Single.display_name # # @return [ String ] The display name. # # @since 2.0.0 def display_name NAME end # Elect a primary server within this topology. # # @example Elect a primary server. # topology.elect_primary(description, servers) # # @param [ Server::Description ] description The description of the # elected primary. # @param [ Array ] servers The list of known servers to the # cluster. # # @return [ Single ] The topology. def elect_primary(description, servers); self; end # Determine if the topology would select a readable server for the # provided candidates and read preference. # # @example Is a readable server present? # topology.has_readable_server?(cluster, server_selector) # # @param [ Cluster ] cluster The cluster. # @param [ ServerSelector ] server_selector The server # selector. # # @return [ true ] A standalone always has a readable server. # # @since 2.4.0 def has_readable_server?(cluster, server_selector = nil); true; end # Determine if the topology would select a writable server for the # provided candidates. # # @example Is a writable server present? # topology.has_writable_server?(servers) # # @param [ Cluster ] cluster The cluster. # # @return [ true ] A standalone always has a writable server. # # @since 2.4.0 def has_writable_server?(cluster); true; end # Initialize the topology with the options. # # @example Initialize the topology. # Single.new(options) # # @param [ Hash ] options The options. # @param [ Monitoring ] monitoring The monitoring. # @param [ Array ] seeds The seeds. # # @since 2.0.0 def initialize(options, monitoring, seeds = []) @options = options @monitoring = monitoring @seed = seeds.first end # A single topology is not a replica set. # # @example Is the topology a replica set? # Single.replica_set? # # @return [ false ] Always false. # # @since 2.0.0 def replica_set?; false; end # Single topologies have no replica set name. # # @example Get the replica set name. # single.replica_set_name # # @return [ nil ] Always nil. # # @since 2.0.0 def replica_set_name; nil; end # Select appropriate servers for this topology. # # @example Select the servers. # Single.servers(servers, 'test') # # @param [ Array ] servers The known servers. # # @return [ Array ] The single servers. # # @since 2.0.0 def servers(servers, name = nil) [ servers.detect { |server| !server.unknown? } ] end # Whether a server description's hosts may be added to the cluster. # # @example Check if a description's hosts may be added to the cluster. # topology.add_hosts?(description, servers) # # @param [ Mongo::Server::Description ] description The description. # @param [ Array ] servers The cluster servers. # # @return [ false ] A description's hosts are never added to a # cluster of Single topology. # # @since 2.0.6 def add_hosts?(description, servers); false; end # Whether a description can be used to remove hosts from the cluster. # # @example Check if a description can be used to remove hosts from # the cluster. # topology.remove_hosts?(description) # # @param [ Mongo::Server::Description ] description The description. # # @return [ true ] A description can never be used to remove hosts # from a cluster of Single topology. # # @since 2.0.6 def remove_hosts?(description); false; end # Whether a specific server in the cluster can be removed, given a description. # # @example Check if a specific server can be removed from the cluster. # topology.remove_server?(description, server) # # @param [ Mongo::Server::Description ] description The description. # @param [ Mongo::Serve ] server The server in question. # # @return [ false ] A server is never removed from a cluster of Single topology. # # @since 2.0.6 def remove_server?(description, server); false; end # A single topology is not sharded. # # @example Is the topology sharded? # Single.sharded? # # @return [ false ] Always false. # # @since 2.0.0 def sharded?; false; end # A single topology is single. # # @example Is the topology single? # Single.single? # # @return [ true ] Always true. # # @since 2.0.0 def single?; true; end # An single topology is not unknown. # # @example Is the topology unknown? # Single.unknown? # # @return [ false ] Always false. # # @since 2.0.0 def unknown?; false; end # Notify the topology that a standalone was discovered. # # @example Notify the topology that a standalone was discovered. # topology.standalone_discovered # # @return [ Topology::Single ] Always returns self. # # @since 2.0.6 def standalone_discovered; self; end # Publish that a member of this topology was discovered. # # @example Publish that a member was discovered. # topology.member_discovered # # @since 2.4.0 def member_discovered publish_sdam_event( Monitoring::TOPOLOGY_CHANGED, Monitoring::Event::TopologyChanged.new(self, self) ) end end end end end mongo-2.5.1/lib/mongo/cluster/topology/sharded.rb0000644000004100000410000001662213257253113022060 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster module Topology # Defines behaviour for when a cluster is in sharded topology. # # @since 2.0.0 class Sharded include Monitoring::Publishable # The display name for the topology. # # @since 2.0.0 NAME = 'Sharded'.freeze # @return [ Hash ] options The options. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # Get the display name. # # @example Get the display name. # Sharded.display_name # # @return [ String ] The display name. # # @since 2.0.0 def display_name NAME end # Elect a primary server within this topology. # # @example Elect a primary server. # topology.elect_primary(description, servers) # # @param [ Server::Description ] description The description of the # elected primary. # @param [ Array ] servers The list of known servers to the # cluster. # # @return [ Sharded ] The topology. def elect_primary(description, servers); self; end # Determine if the topology would select a readable server for the # provided candidates and read preference. # # @example Is a readable server present? # topology.has_readable_server?(cluster, server_selector) # # @param [ Cluster ] cluster The cluster. # @param [ ServerSelector ] server_selector The server # selector. # # @return [ true ] A Sharded cluster always has a readable server. # # @since 2.4.0 def has_readable_server?(cluster, server_selector = nil); true; end # Determine if the topology would select a writable server for the # provided candidates. # # @example Is a writable server present? # topology.has_writable_server?(servers) # # @param [ Cluster ] cluster The cluster. # # @return [ true ] A Sharded cluster always has a writable server. # # @since 2.4.0 def has_writable_server?(cluster); true; end # Initialize the topology with the options. # # @example Initialize the topology. # Sharded.new(options) # # @param [ Hash ] options The options. # @param [ Monitoring ] monitoring The monitoring. # @param [ Array ] seeds The seeds. # # @since 2.0.0 def initialize(options, monitoring, seeds = []) @options = options @monitoring = monitoring end # A sharded topology is not a replica set. # # @example Is the topology a replica set? # Sharded.replica_set? # # @return [ false ] Always false. # # @since 2.0.0 def replica_set?; false; end # Sharded topologies have no replica set name. # # @example Get the replica set name. # sharded.replica_set_name # # @return [ nil ] Always nil. # # @since 2.0.0 def replica_set_name; nil; end # Select appropriate servers for this topology. # # @example Select the servers. # Sharded.servers(servers) # # @param [ Array ] servers The known servers. # # @return [ Array ] The mongos servers. # # @since 2.0.0 def servers(servers) servers.select{ |server| server.mongos? } end # Whether a server description's hosts may be added to the cluster. # # @example Check if a description's hosts may be added to the cluster. # topology.add_hosts?(description, servers) # # @param [ Mongo::Server::Description ] description The description. # @param [ Array ] servers The cluster servers. # # @return [ false ] A description's hosts are never added to a # sharded cluster. # # @since 2.0.6 def add_hosts?(description, servers); false; end # Whether a description can be used to remove hosts from the cluster. # # @example Check if a description can be used to remove hosts from # the cluster. # topology.remove_hosts?(description) # # @param [ Mongo::Server::Description ] description The description. # # @return [ true ] A description can always be used to remove hosts # from a sharded cluster. # # @since 2.0.6 def remove_hosts?(description); true; end # Whether a specific server in the cluster can be removed, given a description. # # @example Check if a specific server can be removed from the cluster. # topology.remove_server?(description, server) # # @param [ Mongo::Server::Description ] description The description. # @param [ Mongo::Serve ] server The server in question. # # @return [ true, false ] Whether the server can be removed from the cluster. # # @since 2.0.6 def remove_server?(description, server) remove_self?(description, server) || !(server.mongos? || server.unknown?) end # A sharded topology is sharded. # # @example Is the topology sharded? # Sharded.sharded? # # @return [ true ] Always true. # # @since 2.0.0 def sharded?; true; end # A sharded topology is not single. # # @example Is the topology single? # Sharded.single? # # @return [ false ] Always false. # # @since 2.0.0 def single?; false; end # A sharded topology is not unknown. # # @example Is the topology unknown? # Sharded.unknown? # # @return [ false ] Always false. # # @since 2.0.0 def unknown?; false; end # Notify the topology that a standalone was discovered. # # @example Notify the topology that a standalone was discovered. # topology.standalone_discovered # # @return [ Topology::Sharded ] Always returns self. # # @since 2.0.6 def standalone_discovered; self; end # Notify the topology that a member was discovered. # # @example Notify the cluster that a member was discovered. # topology.member_discovered # # @since 2.4.0 def member_discovered; end; private def remove_self?(description, server) description.is_server?(server) && !(description.mongos? || description.unknown?) end end end end end mongo-2.5.1/lib/mongo/cluster/topology.rb0000644000004100000410000000361313257253113020442 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/cluster/topology/replica_set' require 'mongo/cluster/topology/sharded' require 'mongo/cluster/topology/single' require 'mongo/cluster/topology/unknown' module Mongo class Cluster # Defines behaviour for getting servers. # # @since 2.0.0 module Topology extend self # The various topologies for server selection. # # @since 2.0.0 OPTIONS = { replica_set: ReplicaSet, sharded: Sharded, direct: Single }.freeze # Get the initial cluster topology for the provided options. # # @example Get the initial cluster topology. # Topology.initial(topology: :replica_set) # # @param [ Array ] seeds The addresses of the configured servers. # @param [ Monitoring ] monitoring The monitoring. # @param [ Hash ] options The cluster options. # # @return [ ReplicaSet, Sharded, Single ] The topology. # # @since 2.0.0 def initial(seeds, monitoring, options) if options.has_key?(:connect) OPTIONS.fetch(options[:connect].to_sym).new(options, monitoring, seeds) elsif options.has_key?(:replica_set) ReplicaSet.new(options, monitoring, seeds) else Unknown.new(options, monitoring, seeds) end end end end end mongo-2.5.1/lib/mongo/cluster/reapers/0000755000004100000410000000000013257253113017677 5ustar www-datawww-datamongo-2.5.1/lib/mongo/cluster/reapers/socket_reaper.rb0000644000004100000410000000331113257253113023050 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster # A manager that calls a method on each of a cluster's pools to close stale # sockets. # # @api private # # @since 2.5.0 class SocketReaper # Initialize the SocketReaper object. # # @example Initialize the socket reaper. # SocketReaper.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster whose pools' stale sockets # need to be reaped at regular intervals. # # @since 2.5.0 def initialize(cluster) @cluster = cluster end # Execute the operation to close the pool's stale sockets. # # @example Close the stale sockets in each of the cluster's pools. # socket_reaper.execute # # @since 2.5.0 def execute @cluster.servers.each do |server| server.pool.close_stale_sockets! end and true end # When the socket reaper is garbage-collected, there's no need to close stale sockets; # sockets will be closed anyway when the pools are garbage-collected. # # @since 2.5.0 def flush; end end end end mongo-2.5.1/lib/mongo/cluster/reapers/cursor_reaper.rb0000644000004100000410000001015313257253113023077 0ustar www-datawww-data# Copyright (C) 2014-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'set' module Mongo class Cluster # A manager that sends kill cursors operations at regular intervals to close # cursors that have been garbage collected without being exhausted. # # @api private # # @since 2.3.0 class CursorReaper include Retryable # The default time interval for the cursor reaper to send pending kill cursors operations. # # @since 2.3.0 FREQUENCY = 1.freeze # Create a cursor reaper. # # @example Create a CursorReaper. # Mongo::Cluster::CursorReaper.new(cluster) # # @api private # # @since 2.3.0 def initialize @to_kill = {} @active_cursors = Set.new @mutex = Mutex.new end # Schedule a kill cursors operation to be eventually executed. # # @example Schedule a kill cursors operation. # cursor_reaper.schedule_kill_cursor(id, op_spec, server) # # @param [ Integer ] id The id of the cursor to kill. # @param [ Hash ] op_spec The spec for the kill cursors op. # @param [ Mongo::Server ] server The server to send the kill cursors operation to. # # @api private # # @since 2.3.0 def schedule_kill_cursor(id, op_spec, server) @mutex.synchronize do if @active_cursors.include?(id) @to_kill[server] ||= Set.new @to_kill[server] << op_spec end end end # Register a cursor id as active. # # @example Register a cursor as active. # cursor_reaper.register_cursor(id) # # @param [ Integer ] id The id of the cursor to register as active. # # @api private # # @since 2.3.0 def register_cursor(id) if id && id > 0 @mutex.synchronize do @active_cursors << id end end end # Unregister a cursor id, indicating that it's no longer active. # # @example Unregister a cursor. # cursor_reaper.unregister_cursor(id) # # @param [ Integer ] id The id of the cursor to unregister. # # @api private # # @since 2.3.0 def unregister_cursor(id) @mutex.synchronize do @active_cursors.delete(id) end end # Execute all pending kill cursors operations. # # @example Execute pending kill cursors operations. # cursor_reaper.kill_cursors # # @api private # # @since 2.3.0 def kill_cursors to_kill_copy = {} active_cursors_copy = [] @mutex.synchronize do to_kill_copy = @to_kill.dup active_cursors_copy = @active_cursors.dup @to_kill = {} end to_kill_copy.each do |server, op_specs| op_specs.each do |op_spec| if server.features.find_command_enabled? Cursor::Builder::KillCursorsCommand.update_cursors(op_spec, active_cursors_copy.to_a) if Cursor::Builder::KillCursorsCommand.get_cursors_list(op_spec).size > 0 Operation::Commands::Command.new(op_spec).execute(server) end else Cursor::Builder::OpKillCursors.update_cursors(op_spec, active_cursors_copy.to_a) if Cursor::Builder::OpKillCursors.get_cursors_list(op_spec).size > 0 Operation::KillCursors.new(op_spec).execute(server) end end end end end alias :execute :kill_cursors alias :flush :kill_cursors end end end mongo-2.5.1/lib/mongo/cluster/app_metadata.rb0000644000004100000410000000767713257253113021224 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'rbconfig' module Mongo class Cluster # Application metadata that is sent to the server in an ismaster command, # when a new connection is established. # # @api private # # @since 2.4.0 class AppMetadata extend Forwardable # The max application metadata document byte size. # # @since 2.4.0 MAX_DOCUMENT_SIZE = 512.freeze # The max application name byte size. # # @ since 2.4.0 MAX_APP_NAME_SIZE = 128.freeze # The driver name. # # @ since 2.4.0 DRIVER_NAME = 'mongo-ruby-driver' # Instantiate the new AppMetadata object. # # @api private # # @example Instantiate the app metadata. # Mongo::Cluster.AppMetadata.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster. # # @since 2.4.0 def initialize(cluster) @app_name = cluster.options[:app_name] @platform = cluster.options[:platform] @compressors = cluster.options[:compressors] || [] end # Get the bytes of the ismaster message including this metadata. # # @api private # # @example Get the ismaster message bytes. # metadata.ismaster_bytes # # @return [ String ] The raw bytes. # # @since 2.4.0 def ismaster_bytes @ismaster_bytes ||= validate! && serialize.to_s end private def validate! if @app_name && @app_name.bytesize > MAX_APP_NAME_SIZE raise Error::InvalidApplicationName.new(@app_name, MAX_APP_NAME_SIZE) end true end def full_client_document BSON::Document.new.tap do |doc| doc[:application] = { name: @app_name } if @app_name doc[:driver] = driver_doc doc[:os] = os_doc doc[:platform] = platform end end def serialize Protocol::Query.new(Database::ADMIN, Database::COMMAND, document, :limit => -1).serialize end def document client_document = full_client_document while client_document.to_bson.to_s.size > MAX_DOCUMENT_SIZE do if client_document[:os][:name] || client_document[:os][:architecture] client_document[:os].delete(:name) client_document[:os].delete(:architecture) elsif client_document[:platform] client_document.delete(:platform) else client_document = nil end end document = Server::Monitor::Connection::ISMASTER document = document.merge(compression: @compressors) document[:client] = client_document document end def driver_doc { name: DRIVER_NAME, version: Mongo::VERSION } end def os_doc { type: type, name: name, architecture: architecture } end def type (RbConfig::CONFIG && RbConfig::CONFIG['host_os']) ? RbConfig::CONFIG['host_os'].split('_').first[/[a-z]+/i].downcase : 'unknown' end def name RbConfig::CONFIG['host_os'] end def architecture RbConfig::CONFIG['target_cpu'] end def platform [ @platform, RUBY_VERSION, RUBY_PLATFORM, RbConfig::CONFIG['build'] ].compact.join(', ') end end end end mongo-2.5.1/lib/mongo/cluster/periodic_executor.rb0000644000004100000410000000465613257253113022312 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cluster # A manager that calls #execute on its executors at a regular interval. # # @api private # # @since 2.5.0 class PeriodicExecutor # The default time interval for the periodic executor to execute. # # @since 2.5.0 FREQUENCY = 5 # Create a periodic executor. # # @example Create a PeriodicExecutor. # Mongo::Cluster::PeriodicExecutor.new(reaper, reaper2) # # @api private # # @since 2.5.0 def initialize(*executors) @thread = nil @executors = executors end # Start the thread. # # @example Start the periodic executor's thread. # periodic_executor.run! # # @api private # # @since 2.5.0 def run! @thread && @thread.alive? ? @thread : start! end alias :restart! :run! # Stop the executor's thread. # # @example Stop the executors's thread. # periodic_executor.stop! # # @api private # # @since 2.5.0 def stop! begin; flush; rescue; end @thread.kill && @thread.stop? end # Trigger an execute call on each reaper. # # @example Trigger all reapers. # periodic_executor.execute # # @api private # # @since 2.5.0 def execute @executors.each(&:execute) and true end # Execute all pending operations. # # @example Execute all pending operations. # periodic_executor.flush # # @api private # # @since 2.5.0 def flush @executors.each(&:flush) and true end private def start! @thread = Thread.new(FREQUENCY) do |i| loop do sleep(i) execute end end end end end end mongo-2.5.1/lib/mongo/uri.rb0000644000004100000410000004572013257253113015711 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'uri' module Mongo # The URI class provides a way for users to parse the MongoDB uri as # defined in the connection string format spec. # # http://docs.mongodb.org/manual/reference/connection-string/ # # @example Use the uri string to make a client connection. # uri = Mongo::URI.new('mongodb://localhost:27017') # client = Mongo::Client.new(uri.servers, uri.options) # client.login(uri.credentials) # client[uri.database] # # @since 2.0.0 class URI include Loggable # The uri parser object options. # # @since 2.0.0 attr_reader :options # The options specified in the uri. # # @since 2.1.0 attr_reader :uri_options # The servers specified in the uri. # # @since 2.0.0 attr_reader :servers # The mongodb connection string scheme. # # @deprecated Will be removed in 3.0. # # @since 2.0.0 SCHEME = 'mongodb://'.freeze # The mongodb connection string scheme root. # # @since 2.5.0 MONGODB_SCHEME = 'mongodb'.freeze # The mongodb srv protocol connection string scheme root. # # @since 2.5.0 MONGODB_SRV_SCHEME = 'mongodb+srv'.freeze # Error details for an invalid scheme. # # @since 2.1.0 INVALID_SCHEME = "Invalid scheme. Scheme must be '#{MONGODB_SCHEME}' or '#{MONGODB_SRV_SCHEME}'".freeze # MongoDB URI format specification. # # @since 2.0.0 FORMAT = 'mongodb://[username:password@]host1[:port1][,host2[:port2]' + ',...[,hostN[:portN]]][/[database][?options]]'.freeze # MongoDB URI (connection string) documentation url # # @since 2.0.0 HELP = 'http://docs.mongodb.org/manual/reference/connection-string/'.freeze # Unsafe characters that must be urlencoded. # # @since 2.1.0 UNSAFE = /[\:\/\+\@]/ # Percent sign that must be encoded in user creds. # # @since 2.5.1 PERCENT_CHAR = /\%/ # Unix socket suffix. # # @since 2.1.0 UNIX_SOCKET = /.sock/ # The character delimiting hosts. # # @since 2.1.0 HOST_DELIM = ','.freeze # The character separating a host and port. # # @since 2.1.0 HOST_PORT_DELIM = ':'.freeze # The character delimiting a database. # # @since 2.1.0 DATABASE_DELIM = '/'.freeze # The character delimiting options. # # @since 2.1.0 URI_OPTS_DELIM = '?'.freeze # The character delimiting multiple options. # # @since 2.1.0 INDIV_URI_OPTS_DELIM = '&'.freeze # The character delimiting an option and its value. # # @since 2.1.0 URI_OPTS_VALUE_DELIM = '='.freeze # The character separating a username from the password. # # @since 2.1.0 AUTH_USER_PWD_DELIM = ':'.freeze # The character delimiting auth credentials. # # @since 2.1.0 AUTH_DELIM = '@'.freeze # Scheme delimiter. # # @since 2.5.0 SCHEME_DELIM = '://'.freeze # Error details for an invalid options format. # # @since 2.1.0 INVALID_OPTS_VALUE_DELIM = "Options and their values must be delimited" + " by '#{URI_OPTS_VALUE_DELIM}'".freeze # Error details for an non-urlencoded user name or password. # # @since 2.1.0 UNESCAPED_USER_PWD = "User name and password must be urlencoded.".freeze # Error details for a non-urlencoded unix socket path. # # @since 2.1.0 UNESCAPED_UNIX_SOCKET = "UNIX domain sockets must be urlencoded.".freeze # Error details for a non-urlencoded auth databsae name. # # @since 2.1.0 UNESCAPED_DATABASE = "Auth database must be urlencoded.".freeze # Error details for providing options without a database delimiter. # # @since 2.1.0 INVALID_OPTS_DELIM = "Database delimiter '#{DATABASE_DELIM}' must be present if options are specified.".freeze # Error details for a missing host. # # @since 2.1.0 INVALID_HOST = "Missing host; at least one must be provided.".freeze # Error details for an invalid port. # # @since 2.1.0 INVALID_PORT = "Invalid port. Port must be an integer greater than 0 and less than 65536".freeze # Map of URI read preference modes to ruby driver read preference modes # # @since 2.0.0 READ_MODE_MAP = { 'primary' => :primary, 'primarypreferred' => :primary_preferred, 'secondary' => :secondary, 'secondarypreferred' => :secondary_preferred, 'nearest' => :nearest }.freeze # Map of URI authentication mechanisms to ruby driver mechanisms # # @since 2.0.0 AUTH_MECH_MAP = { 'PLAIN' => :plain, 'MONGODB-CR' => :mongodb_cr, 'GSSAPI' => :gssapi, 'MONGODB-X509' => :mongodb_x509, 'SCRAM-SHA-1' => :scram }.freeze # Options that are allowed to appear more than once in the uri. # # @since 2.1.0 REPEATABLE_OPTIONS = [ :tag_sets ] # Get either a URI object or a SRVProtocol URI object. # # @example Get the uri object. # URI.get(string) # # @return [URI, URI::SRVProtocol] The uri object. # # @since 2.5.0 def self.get(string, opts = {}) scheme, _, remaining = string.partition(SCHEME_DELIM) case scheme when MONGODB_SCHEME URI.new(string, opts) when MONGODB_SRV_SCHEME SRVProtocol.new(string, opts) else raise Error::InvalidURI.new(string, INVALID_SCHEME) end end # Gets the options hash that needs to be passed to a Mongo::Client on # instantiation, so we don't have to merge the credentials and database in # at that point - we only have a single point here. # # @example Get the client options. # uri.client_options # # @return [ Hash ] The options passed to the Mongo::Client # # @since 2.0.0 def client_options opts = uri_options.merge(:database => database) @user ? opts.merge(credentials) : opts end # Create the new uri from the provided string. # # @example Create the new URI. # URI.new('mongodb://localhost:27017') # # @param [ String ] string The uri string. # @param [ Hash ] options The options. # # @raise [ Error::InvalidURI ] If the uri does not match the spec. # # @since 2.0.0 def initialize(string, options = {}) @string = string @options = options parsed_scheme, _, remaining = string.partition(SCHEME_DELIM) raise_invalid_error!(INVALID_SCHEME) unless parsed_scheme == scheme parse!(remaining) end # Get the credentials provided in the URI. # # @example Get the credentials. # uri.credentials # # @return [ Hash ] The credentials. # * :user [ String ] The user. # * :password [ String ] The provided password. # # @since 2.0.0 def credentials { :user => @user, :password => @password } end # Get the database provided in the URI. # # @example Get the database. # uri.database # # @return [String] The database. # # @since 2.0.0 def database @database ? @database : Database::ADMIN end private def scheme MONGODB_SCHEME end def parse_creds_hosts!(string) hosts, creds = split_creds_hosts(string) @servers = parse_servers!(hosts) @user = parse_user!(creds) @password = parse_password!(creds) end def parse!(remaining) creds_hosts, db_opts = extract_db_opts!(remaining) parse_creds_hosts!(creds_hosts) parse_db_opts!(db_opts) end def extract_db_opts!(string) db_opts, _, creds_hosts = string.reverse.partition(DATABASE_DELIM) db_opts, creds_hosts = creds_hosts, db_opts if creds_hosts.empty? if db_opts.empty? && creds_hosts.include?(URI_OPTS_DELIM) raise_invalid_error!(INVALID_OPTS_DELIM) end [ creds_hosts, db_opts ].map { |s| s.reverse } end def split_creds_hosts(string) hosts, _, creds = string.reverse.partition(AUTH_DELIM) hosts, creds = creds, hosts if hosts.empty? [ hosts, creds ].map { |s| s.reverse } end def parse_db_opts!(string) auth_db, _, uri_opts = string.partition(URI_OPTS_DELIM) @uri_options = Options::Redacted.new(parse_uri_options!(uri_opts)) @database = parse_database!(auth_db) end def parse_uri_options!(string) return {} unless string string.split(INDIV_URI_OPTS_DELIM).reduce({}) do |uri_options, opt| raise_invalid_error!(INVALID_OPTS_VALUE_DELIM) unless opt.index(URI_OPTS_VALUE_DELIM) key, value = opt.split(URI_OPTS_VALUE_DELIM) strategy = URI_OPTION_MAP[key.downcase] if strategy.nil? log_warn("Unsupported URI option '#{key}' on URI '#{@string}'. It will be ignored.") else add_uri_option(strategy, value, uri_options) end uri_options end end def parse_user!(string) if (string && user = string.partition(AUTH_USER_PWD_DELIM)[0]) if user.length > 0 raise_invalid_error!(UNESCAPED_USER_PWD) if user =~ UNSAFE user_decoded = decode(user) if user_decoded =~ PERCENT_CHAR && encode(user_decoded) != user raise_invalid_error!(UNESCAPED_USER_PWD) end user_decoded end end end def parse_password!(string) if (string && pwd = string.partition(AUTH_USER_PWD_DELIM)[2]) if pwd.length > 0 raise_invalid_error!(UNESCAPED_USER_PWD) if pwd =~ UNSAFE pwd_decoded = decode(pwd) if pwd_decoded =~ PERCENT_CHAR && encode(pwd_decoded) != pwd raise_invalid_error!(UNESCAPED_USER_PWD) end pwd_decoded end end end def parse_database!(string) raise_invalid_error!(UNESCAPED_DATABASE) if string =~ UNSAFE decode(string) if string.length > 0 end def validate_port_string!(port) unless port.nil? || (port.length > 0 && port.to_i > 0 && port.to_i <= 65535) raise_invalid_error!(INVALID_PORT) end end def parse_servers!(string) raise_invalid_error!(INVALID_HOST) unless string.size > 0 string.split(HOST_DELIM).reduce([]) do |servers, host| if host[0] == '[' if host.index(']:') h, p = host.split(']:') validate_port_string!(p) end elsif host.index(HOST_PORT_DELIM) h, _, p = host.partition(HOST_PORT_DELIM) raise_invalid_error!(INVALID_HOST) unless h.size > 0 validate_port_string!(p) elsif host =~ UNIX_SOCKET raise_invalid_error!(UNESCAPED_UNIX_SOCKET) if host =~ UNSAFE host = decode(host) end servers << host end end def raise_invalid_error!(details) raise Error::InvalidURI.new(@string, details, FORMAT) end def decode(value) ::URI.decode(value) end def encode(value) ::URI.encode(value) end # Hash for storing map of URI option parameters to conversion strategies URI_OPTION_MAP = {} # Simple internal dsl to register a MongoDB URI option in the URI_OPTION_MAP. # # @param uri_key [String] The MongoDB URI option to register. # @param name [Symbol] The name of the option in the driver. # @param extra [Hash] Extra options. # * :group [Symbol] Nested hash where option will go. # * :type [Symbol] Name of function to transform value. def self.uri_option(uri_key, name, extra = {}) URI_OPTION_MAP[uri_key] = { :name => name }.merge(extra) end # Replica Set Options uri_option 'replicaset', :replica_set, :type => :replica_set # Timeout Options uri_option 'connecttimeoutms', :connect_timeout, :type => :ms_convert uri_option 'sockettimeoutms', :socket_timeout, :type => :ms_convert uri_option 'serverselectiontimeoutms', :server_selection_timeout, :type => :ms_convert uri_option 'localthresholdms', :local_threshold, :type => :ms_convert # Write Options uri_option 'w', :w, :group => :write uri_option 'journal', :j, :group => :write uri_option 'fsync', :fsync, :group => :write uri_option 'wtimeoutms', :timeout, :group => :write # Read Options uri_option 'readpreference', :mode, :group => :read, :type => :read_mode uri_option 'readpreferencetags', :tag_sets, :group => :read, :type => :read_tags uri_option 'maxstalenessseconds', :max_staleness, :group => :read # Pool options uri_option 'minpoolsize', :min_pool_size uri_option 'maxpoolsize', :max_pool_size uri_option 'waitqueuetimeoutms', :wait_queue_timeout, :type => :ms_convert # Security Options uri_option 'ssl', :ssl # Topology options uri_option 'connect', :connect # Auth Options uri_option 'authsource', :auth_source, :type => :auth_source uri_option 'authmechanism', :auth_mech, :type => :auth_mech uri_option 'authmechanismproperties', :auth_mech_properties, :type => :auth_mech_props # Client Options uri_option 'appname', :app_name uri_option 'compressors', :compressors, :type => :array uri_option 'zlibcompressionlevel', :zlib_compression_level # Casts option values that do not have a specifically provided # transformation to the appropriate type. # # @param value [String] The value to be cast. # # @return [true, false, Fixnum, Symbol] The cast value. def cast(value) if value == 'true' true elsif value == 'false' false elsif value =~ /[\d]/ value.to_i else decode(value).to_sym end end # Applies URI value transformation by either using the default cast # or a transformation appropriate for the given type. # # @param value [String] The value to be transformed. # @param type [Symbol] The transform method. def apply_transform(value, type = nil) if type send(type, value) else cast(value) end end # Selects the output destination for an option. # # @param [Hash] uri_options The base target. # @param [Symbol] group Group subtarget. # # @return [Hash] The target for the option. def select_target(uri_options, group = nil) if group uri_options[group] ||= {} else uri_options end end # Merges a new option into the target. # # If the option exists at the target destination the merge will # be an addition. # # Specifically required to append an additional tag set # to the array of tag sets without overwriting the original. # # @param target [Hash] The destination. # @param value [Object] The value to be merged. # @param name [Symbol] The name of the option. def merge_uri_option(target, value, name) if target.key?(name) if REPEATABLE_OPTIONS.include?(name) target[name] += value else log_warn("Repeated option key: #{name}.") end else target.merge!(name => value) end end # Adds an option to the uri options hash via the supplied strategy. # # Acquires a target for the option based on group. # Transforms the value. # Merges the option into the target. # # @param strategy [Symbol] The strategy for this option. # @param value [String] The value of the option. # @param uri_options [Hash] The base option target. def add_uri_option(strategy, value, uri_options) target = select_target(uri_options, strategy[:group]) value = apply_transform(value, strategy[:type]) merge_uri_option(target, value, strategy[:name]) end # Replica set transformation, avoid converting to Symbol. # # @param value [String] Replica set name. # # @return [String] Same value to avoid cast to Symbol. def replica_set(value) decode(value) end # Auth source transformation, either db string or :external. # # @param value [String] Authentication source. # # @return [String] If auth source is database name. # @return [:external] If auth source is external authentication. def auth_source(value) value == '$external' ? :external : decode(value) end # Authentication mechanism transformation. # # @param value [String] The authentication mechanism. # # @return [Symbol] The transformed authentication mechanism. def auth_mech(value) AUTH_MECH_MAP[value.upcase] end # Read preference mode transformation. # # @param value [String] The read mode string value. # # @return [Symbol] The read mode symbol. def read_mode(value) READ_MODE_MAP[value.downcase] end # Read preference tags transformation. # # @param value [String] The string representing tag set. # # @return [Array] Array with tag set. def read_tags(value) [read_set(value)] end # Read preference tag set extractor. # # @param value [String] The tag set string. # # @return [Hash] The tag set hash. def read_set(value) hash_extractor(value) end # Auth mechanism properties extractor. # # @param value [ String ] The auth mechanism properties string. # # @return [ Hash ] The auth mechanism properties hash. def auth_mech_props(value) properties = hash_extractor(value) if properties[:canonicalize_host_name] properties.merge!(canonicalize_host_name: properties[:canonicalize_host_name] == 'true') end properties end # Ruby's convention is to provide timeouts in seconds, not milliseconds and # to use fractions where more precision is necessary. The connection string # options are always in MS so we provide an easy conversion type. # # @param [ Integer ] value The millisecond value. # # @return [ Float ] The seconds value. # # @since 2.0.0 def ms_convert(value) value.to_f / 1000 end # Extract values from the string and put them into a nested hash. # # @param value [ String ] The string to build a hash from. # # @return [ Hash ] The hash built from the string. def hash_extractor(value) value.split(',').reduce({}) do |set, tag| k, v = tag.split(':') set.merge(decode(k).downcase.to_sym => decode(v)) end end # Extract values from the string and put them into an array. # # @param [ String ] value The string to build an array from. # # @return [ Array ] The array built from the string. def array(value) value.split(',') end end end require 'mongo/uri/srv_protocol' mongo-2.5.1/lib/mongo/retryable.rb0000644000004100000410000001077413257253113017104 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo # Defines basic behaviour around retrying operations. # # @since 2.1.0 module Retryable # Execute a read operation with a retry. # # @api private # # @example Execute the read. # read_with_retry do # ... # end # # @note This only retries read operations on socket errors. # # @param [ Proc ] block The block to execute. # # @yieldparam [ Server ] server The server to which the write should be sent. # # @return [ Result ] The result of the operation. # # @since 2.1.0 def read_with_retry attempt = 0 begin attempt += 1 yield rescue Error::SocketError, Error::SocketTimeoutError => e raise(e) if attempt > cluster.max_read_retries log_retry(e) cluster.scan! retry rescue Error::OperationFailure => e if cluster.sharded? && e.retryable? raise(e) if attempt > cluster.max_read_retries log_retry(e) sleep(cluster.read_retry_interval) retry else raise e end end end # Execute a read operation with a single retry. # # @api private # # @example Execute the read. # read_with_one_retry do # ... # end # # @note This only retries read operations on socket errors. # # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. # # @since 2.2.6 def read_with_one_retry yield rescue Error::SocketError, Error::SocketTimeoutError yield end # Execute a write operation with a retry. # # @api private # # @example Execute the write. # write_with_retry do # ... # end # # @note This only retries operations on not master failures, since it is # the only case we can be sure a partial write did not already occur. # # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. # # @since 2.1.0 def write_with_retry(session, write_concern, &block) unless retry_write_allowed?(session, write_concern) return legacy_write_with_retry(&block) end server = cluster.next_primary unless server.retry_writes? return legacy_write_with_retry(server, &block) end begin txn_num = session.next_txn_num yield(server, txn_num) rescue Error::SocketError, Error::SocketTimeoutError => e retry_write(e, txn_num, &block) rescue Error::OperationFailure => e raise e unless e.write_retryable? retry_write(e, txn_num, &block) end end private def retry_write_allowed?(session, write_concern) session && session.retry_writes? && (write_concern.nil? || write_concern.acknowledged?) end def retry_write(original_error, txn_num, &block) cluster.scan! server = cluster.next_primary raise original_error unless (server.retry_writes? && txn_num) log_retry(original_error) yield(server, txn_num) rescue Error::SocketError, Error::SocketTimeoutError => e cluster.scan! raise e rescue Error::OperationFailure => e raise original_error unless e.write_retryable? cluster.scan! raise e rescue raise original_error end def legacy_write_with_retry(server = nil) attempt = 0 begin attempt += 1 yield(server || cluster.next_primary) rescue Error::OperationFailure => e server = nil raise(e) if attempt > Cluster::MAX_WRITE_RETRIES if e.write_retryable? log_retry(e) cluster.scan! retry else raise(e) end end end # Log a warning so that any application slow down is immediately obvious. def log_retry(e) Logger.logger.warn "Retry due to: #{e.class.name} #{e.message}" end end end mongo-2.5.1/lib/mongo/auth/0000755000004100000410000000000013257253113015516 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/cr/0000755000004100000410000000000013257253113016122 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/cr/conversation.rb0000644000004100000410000001106413257253113021163 0ustar www-datawww-data# Copyright (C) 2014 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Auth class CR # Defines behaviour around a single MONGODB-CR conversation between the # client and server. # # @since 2.0.0 class Conversation # The login message base. # # @since 2.0.0 LOGIN = { authenticate: 1 }.freeze # @return [ Protocol::Message ] reply The current reply in the # conversation. attr_reader :reply # @return [ String ] database The database to authenticate against. attr_reader :database # @return [ String ] nonce The initial auth nonce. attr_reader :nonce # @return [ User ] user The user for the conversation. attr_reader :user # Continue the CR conversation. This sends the client final message # to the server after setting the reply from the previous server # communication. # # @example Continue the conversation. # conversation.continue(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def continue(reply, connection = nil) validate!(reply) if connection && connection.features.op_msg_enabled? selector = LOGIN.merge(user: user.name, nonce: nonce, key: user.auth_key(nonce)) selector[Protocol::Msg::DATABASE_IDENTIFIER] = user.auth_source cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( user.auth_source, Database::COMMAND, LOGIN.merge(user: user.name, nonce: nonce, key: user.auth_key(nonce)), limit: -1 ) end end # Finalize the CR conversation. This is meant to be iterated until # the provided reply indicates the conversation is finished. # # @example Finalize the conversation. # conversation.finalize(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def finalize(reply, connection = nil) validate!(reply) end # Start the CR conversation. This returns the first message that # needs to be send to the server. # # @example Start the conversation. # conversation.start # # @return [ Protocol::Query ] The first CR conversation message. # # @since 2.0.0 def start(connection = nil) if connection && connection.features.op_msg_enabled? selector = Auth::GET_NONCE.merge(Protocol::Msg::DATABASE_IDENTIFIER => user.auth_source) cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( user.auth_source, Database::COMMAND, Auth::GET_NONCE, limit: -1) end end # Create the new conversation. # # @example Create the new conversation. # Conversation.new(user, "admin") # # @param [ Auth::User ] user The user to converse about. # # @since 2.0.0 def initialize(user) @user = user end private def validate!(reply) raise Unauthorized.new(user) if reply.documents[0][Operation::Result::OK] != 1 @nonce = reply.documents[0][Auth::NONCE] @reply = reply end end end end end mongo-2.5.1/lib/mongo/auth/x509.rb0000644000004100000410000000346713257253113016562 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/x509/conversation' module Mongo module Auth # Defines behaviour for x.509 authentication. # # @since 2.0.0 class X509 # The authentication mechinism string. # # @since 2.0.0 MECHANISM = 'MONGODB-X509'.freeze # @return [ Mongo::Auth::User ] The user to authenticate. attr_reader :user # Instantiate a new authenticator. # # @example Create the authenticator. # Mongo::Auth::X509.new(user) # # @param [ Mongo::Auth::User ] user The user to authenticate. # # @since 2.0.0 def initialize(user) @user = user end # Log the user in on the given connection. # # @example Log the user in. # user.login(connection) # # @param [ Mongo::Connection ] connection The connection to log into. # on. # # @return [ Protocol::Message ] The authentication response. # # @since 2.0.0 def login(connection) conversation = Conversation.new(user) reply = connection.dispatch([ conversation.start(connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) conversation.finalize(reply) end end end end mongo-2.5.1/lib/mongo/auth/roles.rb0000644000004100000410000000623613257253113017176 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Auth # Provides constants for the built in roles provided by MongoDB. # # @since 2.0.0 module Roles # @see http://docs.mongodb.org/manual/reference/built-in-roles/#backup # # @since 2.0.0 BACKUP = 'backup'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#clusterAdmin # # @since 2.0.0 CLUSTER_ADMIN = 'clusterAdmin'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#clusterManager # # @since 2.0.0 CLUSTER_MANAGER = 'clusterManager'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#clusterMonitor # # @since 2.0.0 CLUSTER_MONITOR = 'clusterMonitor'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#dbAdmin # # @since 2.0.0 DATABASE_ADMIN = 'dbAdmin'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#dbAdminAnyDatabase # # @since 2.0.0 DATABASE_ADMIN_ANY_DATABASE = 'dbAdminAnyDatabase'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#dbOwner # # @since 2.0.0 DATABASE_OWNER = 'dbOwner'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#hostManager # # @since 2.0.0 HOST_MANAGER = 'hostManager'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#read # # @since 2.0.0 READ = 'read'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#readAnyDatabase # # @since 2.0.0 READ_ANY_DATABASE = 'readAnyDatabase'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#readWriteAnyDatabase # # @since 2.0.0 READ_WRITE_ANY_DATABASE = 'readWriteAnyDatabase'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#readWrite # # @since 2.0.0 READ_WRITE = 'readWrite'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#restore # # @since 2.0.0 RESTORE = 'restore'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#root # # @since 2.0.0 ROOT = 'root'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#userAdmin # # @since 2.0.0 USER_ADMIN = 'userAdmin'.freeze # @see http://docs.mongodb.org/manual/reference/built-in-roles/#userAdminAnyDatabase # # @since 2.0.0 USER_ADMIN_ANY_DATABASE = 'userAdminAnyDatabase'.freeze end end end mongo-2.5.1/lib/mongo/auth/user/0000755000004100000410000000000013257253113016474 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/user/view.rb0000644000004100000410000001142213257253113017773 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Auth class User # Defines behaviour for user related operation on databases. # # @since 2.0.0 class View extend Forwardable # @return [ Database ] database The view's database. attr_reader :database def_delegators :database, :cluster, :read_preference, :client def_delegators :cluster, :next_primary # Create a new user in the database. # # @example Create a new read/write user. # view.create('user', password: 'password', roles: [ 'readWrite' ]) # # @param [ Auth::User, String ] user_or_name The user object or user name. # @param [ Hash ] options The user options. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The command response. # # @since 2.0.0 def create(user_or_name, options = {}) user = generate(user_or_name, options) client.send(:with_session, options) do |session| Operation::Write::Command::CreateUser.new( user: user, db_name: database.name, session: session ).execute(next_primary) end end # Initialize the new user view. # # @example Initialize the user view. # View::User.new(database) # # @param [ Mongo::Database ] database The database the view is for. # # @since 2.0.0 def initialize(database) @database = database end # Remove a user from the database. # # @example Remove the user from the database. # view.remove('user') # # @param [ String ] name The user name. # @param [ Hash ] options The options for the remove operation. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The command response. # # @since 2.0.0 def remove(name, options = {}) client.send(:with_session, options) do |session| Operation::Write::Command::RemoveUser.new( user_name: name, db_name: database.name, session: session ).execute(next_primary) end end # Update a user in the database. # # @example Update a user. # view.update('name', password: 'testpwd') # # @param [ Auth::User, String ] user_or_name The user object or user name. # @param [ Hash ] options The user options. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The response. # # @since 2.0.0 def update(user_or_name, options = {}) client.send(:with_session, options) do |session| user = generate(user_or_name, options) Operation::Write::Command::UpdateUser.new( user: user, db_name: database.name, session: session ).execute(next_primary) end end # Get info for a particular user in the database. # # @example Get a particular user's info. # view.info('emily') # # @param [ String ] name The user name. # @param [ Hash ] options The options for the info operation. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Hash ] A document containing information on a particular user. # # @since 2.1.0 def info(name, options = {}) user_query(name, options).documents end private def user_query(name, options = {}) client.send(:with_session, options) do |session| Operation::Commands::UsersInfo.new( user_name: name, db_name: database.name, session: session ).execute(next_primary) end end def generate(user, options) user.is_a?(String) ? Auth::User.new({ user: user }.merge(options)) : user end end end end end mongo-2.5.1/lib/mongo/auth/user.rb0000644000004100000410000001155213257253113017025 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/user/view' module Mongo module Auth # Represents a user in MongoDB. # # @since 2.0.0 class User # @return [ String ] The authorization source, either a database or # external name. attr_reader :auth_source # @return [ String ] The database the user is created in. attr_reader :database # @return [ Hash ] The authentication mechanism properties. attr_reader :auth_mech_properties # @return [ Symbol ] The authorization mechanism. attr_reader :mechanism # @return [ String ] The username. attr_reader :name # @return [ String ] The cleartext password. attr_reader :password # @return [ Array ] roles The user roles. attr_reader :roles # Determine if this user is equal to another. # # @example Check user equality. # user == other # # @param [ Object ] other The object to compare against. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(User) name == other.name && database == other.database && password == other.password end # Get an authentication key for the user based on a nonce from the # server. # # @example Get the authentication key. # user.auth_key(nonce) # # @param [ String ] nonce The response from the server. # # @return [ String ] The authentication key. # # @since 2.0.0 def auth_key(nonce) Digest::MD5.hexdigest("#{nonce}#{name}#{hashed_password}") end # Get the UTF-8 encoded name with escaped special characters for use with # SCRAM authorization. # # @example Get the encoded name. # user.encoded_name # # @return [ String ] The encoded user name. # # @since 2.0.0 def encoded_name name.encode(BSON::UTF8).gsub('=','=3D').gsub(',','=2C') end # Get the hash key for the user. # # @example Get the hash key. # user.hash # # @return [ String ] The user hash key. # # @since 2.0.0 def hash [ name, database, password ].hash end # Get the user's hashed password. # # @example Get the user's hashed password. # user.hashed_password # # @return [ String ] The hashed password. # # @since 2.0.0 def hashed_password @hashed_password ||= Digest::MD5.hexdigest("#{name}:mongo:#{password}").encode(BSON::UTF8) end # Create the new user. # # @example Create a new user. # Mongo::Auth::User.new(options) # # @param [ Hash ] options The options to create the user from. # # @option options [ String ] :auth_source The authorization database or # external source. # @option options [ String ] :database The database the user is # authorized for. # @option options [ String ] :user The user name. # @option options [ String ] :password The user's password. # @option options [ Symbol ] :auth_mech The authorization mechanism. # @option options [ Array, Array ] roles The user roles. # @option options [ String ] :client_key The user's client key cached from a previous # authentication on the same connection. # # @since 2.0.0 def initialize(options) @database = options[:database] || Database::ADMIN @auth_source = options[:auth_source] || @database @name = options[:user] @password = options[:password] || options[:pwd] @mechanism = options[:auth_mech] || :mongodb_cr @auth_mech_properties = options[:auth_mech_properties] || {} @roles = options[:roles] || [] @client_key = options[:client_key] end # Get the specification for the user, used in creation. # # @example Get the user's specification. # user.spec # # @return [ Hash ] The user spec. # # @since 2.0.0 def spec { pwd: hashed_password, roles: roles } end private # The client key for the user. # # @return [ String ] The client key for the user. attr_reader :client_key end end end mongo-2.5.1/lib/mongo/auth/cr.rb0000644000004100000410000000371113257253113016451 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/cr/conversation' module Mongo module Auth # Defines behaviour for MongoDB-CR authentication. # # @since 2.0.0 class CR # The authentication mechinism string. # # @since 2.0.0 MECHANISM = 'MONGODB-CR'.freeze # @return [ Mongo::Auth::User ] The user to authenticate. attr_reader :user # Instantiate a new authenticator. # # @example Create the authenticator. # Mongo::Auth::CR.new(user) # # @param [ Mongo::Auth::User ] user The user to authenticate. # # @since 2.0.0 def initialize(user) @user = user end # Log the user in on the given connection. # # @example Log the user in. # user.login(connection) # # @param [ Mongo::Connection ] connection The connection to log into. # # @return [ Protocol::Message ] The authentication response. # # @since 2.0.0 def login(connection) conversation = Conversation.new(user) reply = connection.dispatch([ conversation.start(connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) reply = connection.dispatch([ conversation.continue(reply, connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) conversation.finalize(reply, connection) end end end end mongo-2.5.1/lib/mongo/auth/ldap.rb0000644000004100000410000000346413257253113016772 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/ldap/conversation' module Mongo module Auth # Defines behaviour for LDAP Proxy authentication. # # @since 2.0.0 class LDAP # The authentication mechinism string. # # @since 2.0.0 MECHANISM = 'PLAIN'.freeze # @return [ Mongo::Auth::User ] The user to authenticate. attr_reader :user # Instantiate a new authenticator. # # @example Create the authenticator. # Mongo::Auth::LDAP.new(user) # # @param [ Mongo::Auth::User ] user The user to authenticate. # # @since 2.0.0 def initialize(user) @user = user end # Log the user in on the given connection. # # @example Log the user in. # user.login(connection) # # @param [ Mongo::Connection ] connection The connection to log into. # on. # # @return [ Protocol::Message ] The authentication response. # # @since 2.0.0 def login(connection) conversation = Conversation.new(user) reply = connection.dispatch([ conversation.start(connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) conversation.finalize(reply) end end end end mongo-2.5.1/lib/mongo/auth/ldap/0000755000004100000410000000000013257253113016436 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/ldap/conversation.rb0000644000004100000410000000646313257253113021506 0ustar www-datawww-data# Copyright (C) 2014 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Auth class LDAP # Defines behaviour around a single PLAIN conversation between the # client and server. # # @since 2.0.0 class Conversation # The login message. # # @since 2.0.0 LOGIN = { saslStart: 1, autoAuthorize: 1 }.freeze # @return [ Protocol::Message ] reply The current reply in the # conversation. attr_reader :reply # @return [ User ] user The user for the conversation. attr_reader :user # Finalize the PLAIN conversation. This is meant to be iterated until # the provided reply indicates the conversation is finished. # # @example Finalize the conversation. # conversation.finalize(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def finalize(reply) validate!(reply) end # Start the PLAIN conversation. This returns the first message that # needs to be send to the server. # # @example Start the conversation. # conversation.start # # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The first PLAIN conversation message. # # @since 2.0.0 def start(connection = nil) if connection && connection.features.op_msg_enabled? selector = LOGIN.merge(payload: payload, mechanism: LDAP::MECHANISM) selector[Protocol::Msg::DATABASE_IDENTIFIER] = Auth::EXTERNAL cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( Auth::EXTERNAL, Database::COMMAND, LOGIN.merge(payload: payload, mechanism: LDAP::MECHANISM), limit: -1 ) end end # Create the new conversation. # # @example Create the new conversation. # Conversation.new(user, "admin") # # @param [ Auth::User ] user The user to converse about. # # @since 2.0.0 def initialize(user) @user = user end private def payload BSON::Binary.new("\x00#{user.name}\x00#{user.password}") end def validate!(reply) raise Unauthorized.new(user) if reply.documents[0][Operation::Result::OK] != 1 @reply = reply end end end end end mongo-2.5.1/lib/mongo/auth/x509/0000755000004100000410000000000013257253113016223 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/x509/conversation.rb0000644000004100000410000000630413257253113021265 0ustar www-datawww-data# Copyright (C) 2014 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Auth class X509 # Defines behaviour around a single x.509 conversation between the # client and server. # # @since 2.0.0 class Conversation # The login message. # # @since 2.0.0 LOGIN = { authenticate: 1 }.freeze # @return [ Protocol::Message ] reply The current reply in the # conversation. attr_reader :reply # @return [ User ] user The user for the conversation. attr_reader :user # Finalize the x.509 conversation. This is meant to be iterated until # the provided reply indicates the conversation is finished. # # @example Finalize the conversation. # conversation.finalize(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def finalize(reply) validate!(reply) end # Start the x.509 conversation. This returns the first message that # needs to be send to the server. # # @example Start the conversation. # conversation.start # # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The first x.509 conversation message. # # @since 2.0.0 def start(connection = nil) login = LOGIN.merge(mechanism: X509::MECHANISM) login[:user] = user.name if user.name if connection && connection.features.op_msg_enabled? selector = login selector[Protocol::Msg::DATABASE_IDENTIFIER] = user.auth_source cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( Auth::EXTERNAL, Database::COMMAND, login, limit: -1 ) end end # Create the new conversation. # # @example Create the new conversation. # Conversation.new(user, "admin") # # @param [ Auth::User ] user The user to converse about. # # @since 2.0.0 def initialize(user) @user = user end private def validate!(reply) raise Unauthorized.new(user) if reply.documents[0][Operation::Result::OK] != 1 @reply = reply end end end end end mongo-2.5.1/lib/mongo/auth/scram/0000755000004100000410000000000013257253113016623 5ustar www-datawww-datamongo-2.5.1/lib/mongo/auth/scram/conversation.rb0000644000004100000410000003431013257253113021663 0ustar www-datawww-data# Copyright (C) 2014 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'securerandom' require 'base64' module Mongo module Auth class SCRAM # Defines behaviour around a single SCRAM-SHA-1 conversation between the # client and server. # # @since 2.0.0 class Conversation # The base client continue message. # # @since 2.0.0 CLIENT_CONTINUE_MESSAGE = { saslContinue: 1 }.freeze # The base client first message. # # @since 2.0.0 CLIENT_FIRST_MESSAGE = { saslStart: 1, autoAuthorize: 1 }.freeze # The client key string. # # @since 2.0.0 CLIENT_KEY = 'Client Key'.freeze # The digest to use for encryption. # # @since 2.0.0 DIGEST = OpenSSL::Digest::SHA1.new.freeze # The key for the done field in the responses. # # @since 2.0.0 DONE = 'done'.freeze # The conversation id field. # # @since 2.0.0 ID = 'conversationId'.freeze # The iterations key in the responses. # # @since 2.0.0 ITERATIONS = /i=(\d+)/.freeze # The payload field. # # @since 2.0.0 PAYLOAD = 'payload'.freeze # The rnonce key in the responses. # # @since 2.0.0 RNONCE = /r=([^,]*)/.freeze # The salt key in the responses. # # @since 2.0.0 SALT = /s=([^,]*)/.freeze # The server key string. # # @since 2.0.0 SERVER_KEY = 'Server Key'.freeze # The server signature verifier in the response. # # @since 2.0.0 VERIFIER = /v=([^,]*)/.freeze # @return [ String ] nonce The initial user nonce. attr_reader :nonce # @return [ Protocol::Message ] reply The current reply in the # conversation. attr_reader :reply # @return [ User ] user The user for the conversation. attr_reader :user # Continue the SCRAM conversation. This sends the client final message # to the server after setting the reply from the previous server # communication. # # @example Continue the conversation. # conversation.continue(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def continue(reply, connection = nil) validate_first_message!(reply) # The salted password needs to be calculated now; otherwise, if the # client key is cached from a previous authentication, the salt in the # reply will no longer be available for when the salted password is # needed to calculate the server key. salted_password if connection && connection.features.op_msg_enabled? selector = CLIENT_CONTINUE_MESSAGE.merge(payload: client_final_message, conversationId: id) selector[Protocol::Msg::DATABASE_IDENTIFIER] = user.auth_source cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( user.auth_source, Database::COMMAND, CLIENT_CONTINUE_MESSAGE.merge(payload: client_final_message, conversationId: id), limit: -1 ) end end # Finalize the SCRAM conversation. This is meant to be iterated until # the provided reply indicates the conversation is finished. # # @example Finalize the conversation. # conversation.finalize(reply) # # @param [ Protocol::Message ] reply The reply of the previous # message. # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The next message to send. # # @since 2.0.0 def finalize(reply, connection = nil) validate_final_message!(reply) if connection && connection.features.op_msg_enabled? selector = CLIENT_CONTINUE_MESSAGE.merge(payload: client_empty_message, conversationId: id) selector[Protocol::Msg::DATABASE_IDENTIFIER] = user.auth_source cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( user.auth_source, Database::COMMAND, CLIENT_CONTINUE_MESSAGE.merge(payload: client_empty_message, conversationId: id), limit: -1 ) end end # Start the SCRAM conversation. This returns the first message that # needs to be send to the server. # # @example Start the conversation. # conversation.start # # @param [ Mongo::Server::Connection ] connection The connection being authenticated. # # @return [ Protocol::Query ] The first SCRAM conversation message. # # @since 2.0.0 def start(connection = nil) if connection && connection.features.op_msg_enabled? selector = CLIENT_FIRST_MESSAGE.merge(payload: client_first_message, mechanism: SCRAM::MECHANISM) selector[Protocol::Msg::DATABASE_IDENTIFIER] = user.auth_source cluster_time = connection.mongos? && connection.cluster_time selector[Operation::CLUSTER_TIME] = cluster_time if cluster_time Protocol::Msg.new([:none], {}, selector) else Protocol::Query.new( user.auth_source, Database::COMMAND, CLIENT_FIRST_MESSAGE.merge(payload: client_first_message, mechanism: SCRAM::MECHANISM), limit: -1 ) end end # Get the id of the conversation. # # @example Get the id of the conversation. # conversation.id # # @return [ Integer ] The conversation id. # # @since 2.0.0 def id reply.documents[0][ID] end # Create the new conversation. # # @example Create the new conversation. # Conversation.new(user) # # @param [ Auth::User ] user The user to converse about. # # @since 2.0.0 def initialize(user) @user = user @nonce = SecureRandom.base64 @client_key = user.send(:client_key) end private # Auth message algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def auth_message @auth_message ||= "#{first_bare},#{reply.documents[0][PAYLOAD].data},#{without_proof}" end # Get the empty client message. # # @api private # # @since 2.0.0 def client_empty_message BSON::Binary.new('') end # Get the final client message. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def client_final_message BSON::Binary.new("#{without_proof},p=#{client_final}") end # Get the client first message # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def client_first_message BSON::Binary.new("n,,#{first_bare}") end # Client final implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-7 # # @since 2.0.0 def client_final @client_final ||= client_proof(client_key, client_signature(stored_key(client_key), auth_message)) end # Client key algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def client_key @client_key ||= hmac(salted_password, CLIENT_KEY) user.instance_variable_set(:@client_key, @client_key) unless user.send(:client_key) @client_key end # Client proof algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def client_proof(key, signature) @client_proof ||= Base64.strict_encode64(xor(key, signature)) end # Client signature algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def client_signature(key, message) @client_signature ||= hmac(key, message) end # First bare implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-7 # # @since 2.0.0 def first_bare @first_bare ||= "n=#{user.encoded_name},r=#{nonce}" end # H algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-2.2 # # @since 2.0.0 def h(string) digest.digest(string) end # HI algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-2.2 # # @since 2.0.0 def hi(data) OpenSSL::PKCS5.pbkdf2_hmac_sha1( data, Base64.strict_decode64(salt), iterations, digest.size ) end # HMAC algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-2.2 # # @since 2.0.0 def hmac(data, key) OpenSSL::HMAC.digest(digest, data, key) end # Get the iterations from the server response. # # @api private # # @since 2.0.0 def iterations @iterations ||= payload_data.match(ITERATIONS)[1].to_i end # Get the data from the returned payload. # # @api private # # @since 2.0.0 def payload_data reply.documents[0][PAYLOAD].data end # Get the server nonce from the payload. # # @api private # # @since 2.0.0 def rnonce @rnonce ||= payload_data.match(RNONCE)[1] end # Gets the salt from the server response. # # @api private # # @since 2.0.0 def salt @salt ||= payload_data.match(SALT)[1] end # Salted password algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def salted_password @salted_password ||= hi(user.hashed_password) end # Server key algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def server_key @server_key ||= hmac(salted_password, SERVER_KEY) end # Server signature algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def server_signature @server_signature ||= Base64.strict_encode64(hmac(server_key, auth_message)) end # Stored key algorithm implementation. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-3 # # @since 2.0.0 def stored_key(key) h(key) end # Get the verifier token from the server response. # # @api private # # @since 2.0.0 def verifier @verifier ||= payload_data.match(VERIFIER)[1] end # Get the without proof message. # # @api private # # @see http://tools.ietf.org/html/rfc5802#section-7 # # @since 2.0.0 def without_proof @without_proof ||= "c=biws,r=#{rnonce}" end # XOR operation for two strings. # # @api private # # @since 2.0.0 def xor(first, second) first.bytes.zip(second.bytes).map{ |(a,b)| (a ^ b).chr }.join('') end def compare_digest(a, b) check = a.bytesize ^ b.bytesize a.bytes.zip(b.bytes){ |x, y| check |= x ^ y.to_i } check == 0 end def validate_final_message!(reply) validate!(reply) unless compare_digest(verifier, server_signature) raise Error::InvalidSignature.new(verifier, server_signature) end end def validate_first_message!(reply) validate!(reply) raise Error::InvalidNonce.new(nonce, rnonce) unless rnonce.start_with?(nonce) end def validate!(reply) raise Unauthorized.new(user) unless reply.documents[0][Operation::Result::OK] == 1 @reply = reply end private def digest @digest ||= OpenSSL::Digest::SHA1.new.freeze end end end end end mongo-2.5.1/lib/mongo/auth/scram.rb0000644000004100000410000000422513257253113017153 0ustar www-datawww-data# Copyright (C) 2014 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/scram/conversation' module Mongo module Auth # Defines behaviour for SCRAM-SHA1 authentication. # # @since 2.0.0 class SCRAM # The authentication mechinism string. # # @since 2.0.0 MECHANISM = 'SCRAM-SHA-1'.freeze # @return [ Mongo::Auth::User ] The user to authenticate. attr_reader :user # Instantiate a new authenticator. # # @example Create the authenticator. # Mongo::Auth::SCRAM.new(user) # # @param [ Mongo::Auth::User ] user The user to authenticate. # # @since 2.0.0 def initialize(user) @user = user end # Log the user in on the given connection. # # @example Log the user in. # user.login(connection) # # @param [ Mongo::Connection ] connection The connection to log into. # on. # # @return [ Protocol::Message ] The authentication response. # # @since 2.0.0 def login(connection) conversation = Conversation.new(user) reply = connection.dispatch([ conversation.start(connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) reply = connection.dispatch([ conversation.continue(reply, connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) until reply.documents[0][Conversation::DONE] reply = connection.dispatch([ conversation.finalize(reply, connection) ]) connection.update_cluster_time(Operation::Result.new(reply)) end reply end end end end mongo-2.5.1/lib/mongo/options.rb0000644000004100000410000000120413257253113016572 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/options/mapper' require 'mongo/options/redacted' mongo-2.5.1/lib/mongo/operation.rb0000644000004100000410000000364713257253113017114 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/result' require 'mongo/operation/executable' require 'mongo/operation/specifiable' require 'mongo/operation/limited' require 'mongo/operation/object_id_generator' require 'mongo/operation/uses_command_op_msg' require 'mongo/operation/causally_consistent' require 'mongo/operation/read_preference' require 'mongo/operation/takes_write_concern' require 'mongo/operation/read' require 'mongo/operation/write' require 'mongo/operation/commands' require 'mongo/operation/kill_cursors' module Mongo module Operation # The q field constant. # # @since 2.1.0 Q = 'q'.freeze # The u field constant. # # @since 2.1.0 U = 'u'.freeze # The limit field constant. # # @since 2.1.0 LIMIT = 'limit'.freeze # The multi field constant. # # @since 2.1.0 MULTI = 'multi'.freeze # The upsert field constant. # # @since 2.1.0 UPSERT = 'upsert'.freeze # The collation field constant. # # @since 2.4.0 COLLATION = 'collation'.freeze # The array filters field constant. # # @since 2.5.0 ARRAY_FILTERS = 'arrayFilters'.freeze # The operation time field constant. # # @since 2.5.0 OPERATION_TIME = 'operationTime'.freeze # The cluster time field constant. # # @since 2.5.0 CLUSTER_TIME = '$clusterTime'.freeze end end mongo-2.5.1/lib/mongo/operation/0000755000004100000410000000000013257253113016555 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/result.rb0000644000004100000410000002067013257253113020425 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # Result wrapper for operations. # # @since 2.0.0 class Result extend Forwardable include Enumerable # The field name for the cursor document in an aggregation. # # @since 2.2.0 CURSOR = 'cursor'.freeze # The cursor id field in the cursor document. # # @since 2.2.0 CURSOR_ID = 'id'.freeze # The field name for the first batch of a cursor. # # @since 2.2.0 FIRST_BATCH = 'firstBatch'.freeze # The field name for the next batch of a cursor. # # @since 2.2.0 NEXT_BATCH = 'nextBatch'.freeze # The namespace field in the cursor document. # # @since 2.2.0 NAMESPACE = 'ns'.freeze # The number of documents updated in the write. # # @since 2.0.0 N = 'n'.freeze # The ok status field in the result. # # @since 2.0.0 OK = 'ok'.freeze # The result field constant. # # @since 2.2.0 RESULT = 'result'.freeze # @return [ Array ] replies The wrapped wire protocol replies. attr_reader :replies # Is the result acknowledged? # # @note On MongoDB 2.6 and higher all writes are acknowledged since the # driver uses write commands for all write operations. On 2.4 and # lower, the result is acknowledged if the GLE has been executed after # the command. If not, no replies will be specified. Reads will always # return true here since a replies is always provided. # # @return [ true, false ] If the result is acknowledged. # # @since 2.0.0 def acknowledged? !!@replies end # Determine if this result is a collection of multiple replies from the # server. # # @example Is the result for multiple replies? # result.multiple? # # @return [ true, false ] If the result is for multiple replies. # # @since 2.0.0 def multiple? replies.size > 1 end # Get the cursor id if the response is acknowledged. # # @note Cursor ids of 0 indicate there is no cursor on the server. # # @example Get the cursor id. # result.cursor_id # # @return [ Integer ] The cursor id. # # @since 2.0.0 def cursor_id acknowledged? ? replies.last.cursor_id : 0 end # Get the namespace of the cursor. The method should be defined in # result classes where 'ns' is in the server response. # # @return [ Nil ] # # @since 2.0.0 def namespace nil end # Get the documents in the result. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents if acknowledged? replies.flat_map{ |reply| reply.documents } else [] end end # Iterate over the documents in the replies. # # @example Iterate over the documents. # result.each do |doc| # p doc # end # # @return [ Enumerator ] The enumerator. # # @since 2.0.0 # # @yieldparam [ BSON::Document ] Each document in the result. def each(&block) documents.each(&block) end # Initialize a new result. # # @example Instantiate the result. # Result.new(replies) # # @param [ Protocol::Reply ] replies The wire protocol replies. # # @since 2.0.0 def initialize(replies) @replies = [ *replies ] if replies end # Get the pretty formatted inspection of the result. # # @example Inspect the result. # result.inspect # # @return [ String ] The inspection. # # @since 2.0.0 def inspect "#" end # Get the first reply from the result. # # @example Get the first reply. # result.reply # # @return [ Protocol::Reply ] The first reply. # # @since 2.0.0 def reply if acknowledged? replies.first else nil end end # Get the count of documents returned by the server. # # @example Get the number returned. # result.returned_count # # @return [ Integer ] The number of documents returned. # # @since 2.0.0 def returned_count if acknowledged? multiple? ? aggregate_returned_count : reply.number_returned else 0 end end # If the result was a command then determine if it was considered a # success. # # @note If the write was unacknowledged, then this will always return # true. # # @example Was the command successful? # result.successful? # # @return [ true, false ] If the command was successful. # # @since 2.0.0 def successful? return true if !acknowledged? if first_document.has_key?(OK) ok? && parser.message.empty? else !query_failure? && parser.message.empty? end end # Check the first document's ok field. # # @example Check the ok field. # result.ok? # # @return [ true, false ] If the command returned ok. # # @since 2.1.0 def ok? first_document[OK] == 1 end # Validate the result by checking for any errors. # # @note This only checks for errors with writes since authentication is # handled at the connection level and any authentication errors would # be raised there, before a Result is ever created. # # @example Validate the result. # result.validate! # # @raise [ Error::OperationFailure ] If an error is in the result. # # @return [ Result ] The result if verification passed. # # @since 2.0.0 def validate! !successful? ? raise(Error::OperationFailure.new(parser.message, self)) : self end # Get the number of documents written by the server. # # @example Get the number of documents written. # result.written_count # # @return [ Integer ] The number of documents written. # # @since 2.0.0 def written_count if acknowledged? multiple? ? aggregate_written_count : (first_document[N] || 0) else 0 end end alias :n :written_count # Get the operation time reported in the server response. # # @example Get the operation time. # result.operation_time # # @return [ Object ] The operation time value. # # @since 2.5.0 def operation_time first_document && first_document[OPERATION_TIME] end # Get the cluster time reported in the server response. # # @example Get the cluster time. # result.cluster_time # # @return [ BSON::Document ] The cluster time document. # # @since 2.5.0 def cluster_time first_document && first_document[CLUSTER_TIME] end private def aggregate_returned_count replies.reduce(0) do |n, reply| n += reply.number_returned n end end def aggregate_written_count documents.reduce(0) do |n, document| n += (document[N] || 0) n end end def parser @parser ||= Error::Parser.new(first_document, replies) end def first_document @first_document ||= first || BSON::Document.new end def query_failure? replies.first && (replies.first.query_failure? || replies.first.cursor_not_found?) end end end end mongo-2.5.1/lib/mongo/operation/specifiable.rb0000644000004100000410000003004513257253113021352 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # This module contains common functionality for convenience methods getting # various values from the spec. # # @since 2.0.0 module Specifiable # The field for database name. # # @since 2.0.0 DB_NAME = :db_name.freeze # The field for deletes. # # @since 2.0.0 DELETES = :deletes.freeze # The field for delete. # # @since 2.0.0 DELETE = :delete.freeze # The field for documents. # # @since 2.0.0 DOCUMENTS = :documents.freeze # The field for collection name. # # @since 2.0.0 COLL_NAME = :coll_name.freeze # The field for cursor count. # # @since 2.0.0 CURSOR_COUNT = :cursor_count.freeze # The field for cursor id. # # @since 2.0.0 CURSOR_ID = :cursor_id.freeze # The field for cursor ids. # # @since 2.0.0 CURSOR_IDS = :cursor_ids.freeze # The field for an index. # # @since 2.0.0 INDEX = :index.freeze # The field for multiple indexes. # # @since 2.0.0 INDEXES = :indexes.freeze # The field for index names. # # @since 2.0.0 INDEX_NAME = :index_name.freeze # The operation id constant. # # @since 2.1.0 OPERATION_ID = :operation_id.freeze # The field for options. # # @since 2.0.0 OPTIONS = :options.freeze # The read concern option. # # @since 2.2.0 READ_CONCERN = :read_concern.freeze # The max time ms option. # # @since 2.2.5 MAX_TIME_MS = :max_time_ms.freeze # The field for a selector. # # @since 2.0.0 SELECTOR = :selector.freeze # The field for number to return. # # @since 2.0.0 TO_RETURN = :to_return.freeze # The field for updates. # # @since 2.0.0 UPDATES = :updates.freeze # The field for update. # # @since 2.0.0 UPDATE = :update.freeze # The field name for a user. # # @since 2.0.0 USER = :user.freeze # The field name for user name. # # @since 2.0.0 USER_NAME = :user_name.freeze # The field name for a write concern. # # @since 2.0.0 WRITE_CONCERN = :write_concern.freeze # The field name for the read preference. # # @since 2.0.0 READ = :read.freeze # Whether to bypass document level validation. # # @since 2.2.0 BYPASS_DOC_VALIDATION = :bypass_document_validation.freeze # A collation to apply to the operation. # # @since 2.4.0 COLLATION = :collation.freeze # @return [ Hash ] spec The specification for the operation. attr_reader :spec # Check equality of two specifiable operations. # # @example Are the operations equal? # operation == other # # @param [ Object ] other The other operation. # # @return [ true, false ] Whether the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Specifiable) spec == other.spec end alias_method :eql?, :== # Get the cursor count from the spec. # # @example Get the cursor count. # specifiable.cursor_count # # @return [ Integer ] The cursor count. # # @since 2.0.0 def cursor_count spec[CURSOR_COUNT] end # The name of the database to which the operation should be sent. # # @example Get the database name. # specifiable.db_name # # @return [ String ] Database name. # # @since 2.0.0 def db_name spec[DB_NAME] end # Get the deletes from the specification. # # @example Get the deletes. # specifiable.deletes # # @return [ Array ] The deletes. # # @since 2.0.0 def deletes spec[DELETES] end # Get the delete document from the specification. # # @example Get the delete document. # specifiable.delete # # @return [ Hash ] The delete document. # # @since 2.0.0 def delete spec[DELETE] end # The documents to in the specification. # # @example Get the documents. # specifiable.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents spec[DOCUMENTS] end # The name of the collection to which the operation should be sent. # # @example Get the collection name. # specifiable.coll_name # # @return [ String ] Collection name. # # @since 2.0.0 def coll_name spec[COLL_NAME] end # The id of the cursor created on the server. # # @example Get the cursor id. # specifiable.cursor_id # # @return [ Integer ] The cursor id. # # @since 2.0.0 def cursor_id spec[CURSOR_ID] end # The ids of the cursors to kill from the spec. # # @example Get the cursor ids from the spec. # specifiable.cursor_ids # # @return [ Array ] The cursor ids. # # @since 2.0.0 def cursor_ids spec[CURSOR_IDS] end # Get the index from the specification. # # @example Get the index specification. # specifiable.index # # @return [ Hash ] The index specification. # # @since 2.0.0 def index spec[INDEX] end # Get the index name from the spec. # # @example Get the index name. # specifiable.index_name # # @return [ String ] The index name. # # @since 2.0.0 def index_name spec[INDEX_NAME] end # Get the indexes from the specification. # # @example Get the index specifications. # specifiable.indexes # # @return [ Hash ] The index specifications. # # @since 2.0.0 def indexes spec[INDEXES] end # Create the new specifiable operation. # # @example Create the new specifiable operation. # Specifiable.new(spec) # # @param [ Hash ] spec The operation specification. # # @see The individual operations for the values they require in their # specs. # # @since 2.0.0 def initialize(spec) @spec = spec end # Get the operation id for the operation. Used for linking operations in # monitoring. # # @example Get the operation id. # specifiable.operation_id # # @return [ Integer ] The operation id. # # @since 2.1.0 def operation_id spec[OPERATION_ID] end # Get the options for the operation. # # @example Get the options. # specifiable.options # # @return [ Hash ] The options. # # @since 2.0.0 def options spec[OPTIONS] || {} end # Get the read concern from the spec. # # @example Get the read concern. # specifiable.read_concern # # @return [ Hash ] The read concern. # # @since 2.2.0 def read_concern spec[READ_CONCERN] end # Get the max time ms value from the spec. # # @example Get the max time ms. # specifiable.max_time_ms # # @return [ Hash ] The max time ms value. # # @since 2.2.5 def max_time_ms spec[MAX_TIME_MS] end # Whether or not to bypass document level validation. # # @example Get the bypass_document_validation option. # specifiable.bypass_documentation_validation. # # @return [ true, false ] Whether to bypass document level validation. # # @since 2.2.0 def bypass_document_validation spec[BYPASS_DOC_VALIDATION] end # The collation to apply to the operation. # # @example Get the collation option. # specifiable.collation. # # @return [ Hash ] The collation document. # # @since 2.4.0 def collation spec[COLLATION] end # The selector for from the specification. # # @example Get a selector specification. # specifiable.selector. # # @return [ Hash ] The selector spec. # # @since 2.0.0 def selector spec[SELECTOR] end # The number of documents to request from the server. # # @example Get the to return value from the spec. # specifiable.to_return # # @return [ Integer ] The number of documents to return. # # @since 2.0.0 def to_return spec[TO_RETURN] end # The update documents from the spec. # # @example Get the update documents. # # @return [ Array ] The update documents. # # @since 2.0.0 def updates spec[UPDATES] end # The update document from the spec. # # @example Get the update document. # # @return [ Hash ] The update document. # # @since 2.0.0 def update spec[UPDATE] end # The user for user related operations. # # @example Get the user. # specifiable.user # # @return [ Auth::User ] The user. # # @since 2.0.0 def user spec[USER] end # The user name from the specification. # # @example Get the user name. # specifiable.user_name # # @return [ String ] The user name. # # @since 2.0. def user_name spec[USER_NAME] end # The write concern to use for this operation. # # @example Get the write concern. # specifiable.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.0.0 def write_concern @spec[WRITE_CONCERN] end # The read preference for this operation. # # @example Get the read preference. # specifiable.read # # @return [ Mongo::ServerSelector ] The read preference. # # @since 2.0.0 def read @read ||= ServerSelector.get(spec[READ]) if spec[READ] end # Whether the operation is ordered. # # @example Get the ordered value, true is the default. # specifiable.ordered? # # @return [ true, false ] Whether the operation is ordered. # # @since 2.1.0 def ordered? !!(@spec.fetch(:ordered, true)) end # The namespace, consisting of the db name and collection name. # # @example Get the namespace. # specifiable.namespace # # @return [ String ] The namespace. # # @since 2.1.0 def namespace "#{db_name}.#{coll_name}" end # The session to use for the operation. # # @example Get the session. # specifiable.session # # @return [ Session ] The session. # # @since 2.5.0 def session @spec[:session] end # The transaction number for the operation. # # @example Get the transaction number. # specifiable.txn_num # # @return [ Integer ] The transaction number. # # @since 2.5.0 def txn_num @spec[:txn_num] end end end end mongo-2.5.1/lib/mongo/operation/commands/0000755000004100000410000000000013257253113020356 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/get_more/0000755000004100000410000000000013257253113022157 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/get_more/result.rb0000644000004100000410000000323113257253113024021 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class GetMore # Defines custom behaviour of results for the get more command. # # @since 2.2.0 class Result < Operation::Result # Get the cursor id. # # @example Get the cursor id. # result.cursor_id # # @return [ Integer ] The cursor id. # # @since 2.2.0 def cursor_id cursor_document ? cursor_document[CURSOR_ID] : super end # Get the documents in the result. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.2.0 def documents cursor_document[NEXT_BATCH] end private def cursor_document @cursor_document ||= reply.documents[0][CURSOR] end def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/list_collections.rb0000644000004100000410000000276513257253113024266 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB listCollections command operation. # # @example Create the listCollections command operation. # ListCollections.new(db_name: 'test') # # @note A command is actually a query on the virtual '$cmd' collection. # # Initialization: # param [ Hash ] spec The specifications for the command. # # option spec :db_name [ String ] The name of the database whose list of # collection names is requested. # option spec :options [ Hash ] Options for the command. # # @since 2.0.0 class ListCollections < Command private def selector (spec[SELECTOR] || {}).merge( listCollections: 1, filter: { name: { '$not' => /system\.|\$/ }} ) end end end end end require 'mongo/operation/commands/list_collections/result' mongo-2.5.1/lib/mongo/operation/commands/get_more.rb0000644000004100000410000000147713257253113022515 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Encapsulates behaviour for executing a getmore command. # # @since 2.2.0 class GetMore < Command; end end end end require 'mongo/operation/commands/get_more/result' mongo-2.5.1/lib/mongo/operation/commands/users_info/0000755000004100000410000000000013257253113022532 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/users_info/result.rb0000644000004100000410000000226213257253113024377 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class UsersInfo # Defines custom behaviour of results when using the # usersInfo command. # # @since 2.1.0 class Result < Operation::Result # The field name for the users document in a usersInfo result. # # @since 2.1.0 USERS = 'users'.freeze def documents reply.documents.first[USERS] end private def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/find.rb0000644000004100000410000000153513257253113021627 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Encapsulates behaviour for executing a find command. # # @since 2.2.0 class Find < Command include CausallyConsistent end end end end require 'mongo/operation/commands/find/result' mongo-2.5.1/lib/mongo/operation/commands/parallel_scan/0000755000004100000410000000000013257253113023156 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/parallel_scan/result.rb0000644000004100000410000000331313257253113025021 0ustar www-datawww-data # Copyright (C) 2009-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class ParallelScan # Defines custom behaviour of results in a parallel scan. # # @since 2.0.0 class Result < Operation::Result # The name of the cursors field in the result. # # @since 2.0.0 CURSORS = 'cursors'.freeze # Get all the cursor ids from the result. # # @example Get the cursor ids. # result.cursor_ids # # @return [ Array ] The cursor ids. # # @since 2.0.0 def cursor_ids documents.map{ |doc| doc[CURSOR][CURSOR_ID] } end # Get the documents from parallel scan. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents reply.documents[0][CURSORS] end private def first @first ||= reply.documents[0] || {} end end end end end end mongo-2.5.1/lib/mongo/operation/commands/users_info.rb0000644000004100000410000000316413257253113023063 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB operation to get info of a particular user in a database. # # @example Create the users info operation. # UsersInfo.new(:name => 'emily', :db_name => 'test-db') # # Initialization: # param [ Hash ] spec The specifications for the users info operation. # # option spec :user_name [ String ] The name of the user. # option spec :db_name [ String ] The name of the database where the user exists. # option spec :options [ Hash ] Options for the operation. # # @since 2.1.0 class UsersInfo < Command private def selector { :usersInfo => user_name } end def message(server) if server.features.op_msg_enabled? command_op_msg(server, selector, options) else Protocol::Query.new(db_name, query_coll, selector, options) end end end end end end require 'mongo/operation/commands/users_info/result' mongo-2.5.1/lib/mongo/operation/commands/command.rb0000644000004100000410000000264213257253113022325 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB command operation. # # @example Create the command operation. # Command.new({ :selector => { :isMaster => 1 } }) # # Initialization: # param [ Hash ] spec The specifications for the command. # # option spec :selector [ Hash ] The command selector. # option spec :db_name [ String ] The name of the database on which # the command should be executed. # option spec :options [ Hash ] Options for the command. # # @since 2.0.0 class Command include Specifiable include Limited include ReadPreference include Executable include UsesCommandOpMsg private def query_coll Database::COMMAND end end end end end mongo-2.5.1/lib/mongo/operation/commands/distinct.rb0000644000004100000410000000146513257253113022532 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Encapsulates behaviour for executing a distinct command. # # @since 2.5.0 class Distinct < Command include CausallyConsistent end end end end mongo-2.5.1/lib/mongo/operation/commands/list_indexes.rb0000644000004100000410000000304513257253113023377 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB listIndexes command operation. # # @example Create the listIndexes command operation. # ListIndexes.new({ db_name: 'test', coll_name: 'example' }) # # @note A command is actually a query on the virtual '$cmd' collection. # # Initialization: # param [ Hash ] spec The specifications for the command. # # option spec :coll_name [ Hash ] The name of the collection whose index # info is requested. # option spec :db_name [ String ] The name of the database on which # the command should be executed. # option spec :options [ Hash ] Options for the command. # # @since 2.0.0 class ListIndexes < Command private def selector (spec[SELECTOR] || {}).merge(listIndexes: coll_name) end end end end end require 'mongo/operation/commands/list_indexes/result' mongo-2.5.1/lib/mongo/operation/commands/count.rb0000644000004100000410000000145713257253113022042 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Encapsulates behaviour for executing a count command. # # @since 2.5.0 class Count < Command include CausallyConsistent end end end end mongo-2.5.1/lib/mongo/operation/commands/map_reduce.rb0000644000004100000410000000417213257253113023013 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB map reduce operation. # # @note A map/reduce operation can behave like a read and # return a result set, or can behave like a write operation and # output results to a user-specified collection. # # @example Create the map/reduce operation. # MapReduce.new({ # :selector => { # :mapreduce => 'test_coll', # :map => '', # :reduce => '' # }, # :db_name => 'test_db' # }) # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :selector [ Hash ] The map reduce selector. # option spec :db_name [ String ] The name of the database on which # the operation should be executed. # option spec :options [ Hash ] Options for the map reduce command. # # @since 2.0.0 class MapReduce < Command include TakesWriteConcern include CausallyConsistent private def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else sel = update_selector_for_read_pref(sel, server) opts = update_options_for_slave_ok(options, server) Protocol::Query.new(db_name, query_coll, sel, opts) end end end end end end require 'mongo/operation/commands/map_reduce/result' mongo-2.5.1/lib/mongo/operation/commands/collections_info.rb0000644000004100000410000000421513257253113024236 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/commands/collections_info/result' module Mongo module Operation module Commands # A MongoDB operation to get a list of collections info in a database. # # @example Create the collections info operation. # CollectionsInfo.new(:db_name => 'test-db') # # Initialization: # param [ Hash ] spec The specifications for the collections info operation. # # option spec :db_name [ String ] The name of the database whose collections # info is requested. # option spec :options [ Hash ] Options for the operation. # # @since 2.0.0 class CollectionsInfo include Specifiable include ReadPreference include Executable # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.0.0 def execute(server) if server.features.list_collections_enabled? ListCollections.new(spec).execute(server) else server.with_connection do |connection| Result.new(connection.dispatch([ message(server) ])).validate! end end end private def selector { :name => { '$not' => /system\.|\$/ } } end def query_coll Database::NAMESPACES end end end end end mongo-2.5.1/lib/mongo/operation/commands/drop_database.rb0000644000004100000410000000312513257253113023474 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB drop database operation. # # @example Instantiate the operation. # Drop.new(selector: { dropDatabase: 'test' }, :db_name => 'test') # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :db_name [ String ] The name of the database. # option spec :selector [ Hash ] The drop database selector. # option spec :write_concern [ String ] The write concern to use. # Only applied for server version >= 3.4. # # @since 2.4.0 class DropDatabase < Command include TakesWriteConcern private def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else Protocol::Query.new(db_name, query_coll, sel, options) end end end end end end mongo-2.5.1/lib/mongo/operation/commands/find/0000755000004100000410000000000013257253113021276 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/find/result.rb0000644000004100000410000000321613257253113023143 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class Find # Defines custom behaviour of results in find command. # # @since 2.2.0 class Result < Operation::Result # Get the cursor id. # # @example Get the cursor id. # result.cursor_id # # @return [ Integer ] The cursor id. # # @since 2.2.0 def cursor_id cursor_document ? cursor_document[CURSOR_ID] : super end # Get the documents in the result. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.2.0 def documents cursor_document[FIRST_BATCH] end private def cursor_document @cursor_document ||= reply.documents[0][CURSOR] end def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/map_reduce/0000755000004100000410000000000013257253113022462 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/map_reduce/result.rb0000644000004100000410000000740613257253113024334 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class MapReduce # Defines custom behaviour of results for a map reduce operation. # # @since 2.0.0 class Result < Operation::Result # The counts field for the map/reduce. # # @since 2.0.0 COUNTS = 'counts'.freeze # The field name for a result without a cursor. # # @since 2.0.0 RESULTS = 'results'.freeze # The time the operation took constant. # # @since 2.0.0 TIME = 'timeMillis'.freeze # Gets the map/reduce counts from the reply. # # @example Get the counts. # result.counts # # @return [ Hash ] A hash of the result counts. # # @since 2.0.0 def counts reply.documents[0][COUNTS] end # Get the documents from the map/reduce. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents reply.documents[0][RESULTS] || reply.documents[0][RESULT] end # If the result was a command then determine if it was considered a # success. # # @note If the write was unacknowledged, then this will always return # true. # # @example Was the command successful? # result.successful? # # @return [ true, false ] If the command was successful. # # @since 2.0.0 def successful? !documents.nil? end # Get the execution time of the map/reduce. # # @example Get the execution time. # result.time # # @return [ Integer ] The executing time in milliseconds. # # @since 2.0.0 def time reply.documents[0][TIME] end # Validate the result by checking for any errors. # # @note This only checks for errors with writes since authentication is # handled at the connection level and any authentication errors would # be raised there, before a Result is ever created. # # @example Validate the result. # result.validate! # # @raise [ Error::OperationFailure ] If an error is in the result. # # @return [ Result ] The result if verification passed. # # @since 2.0.0 def validate! documents.nil? ? raise(Error::OperationFailure.new(parser.message, self)) : self end # Get the cursor id. # # @example Get the cursor id. # result.cursor_id # # @return [ Integer ] Always 0 because map reduce doesn't return a cursor. # # @since 2.5.0 def cursor_id 0 end private def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/list_indexes/0000755000004100000410000000000013257253113023050 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/list_indexes/result.rb0000644000004100000410000000601313257253113024713 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class ListIndexes # Defines custom behaviour of results when using the # listIndexes command. # # @since 2.0.0 class Result < Operation::Result # Get the cursor id for the result. # # @example Get the cursor id. # result.cursor_id # # @note Even though the wire protocol has a cursor_id field for all # messages of type reply, it is always zero when using the # listIndexes command and must be retrieved from the cursor # document itself. # # @return [ Integer ] The cursor id. # # @since 2.0.0 def cursor_id cursor_document ? cursor_document[CURSOR_ID] : super end # Get the namespace for the cursor. # # @example Get the namespace. # result.namespace # # @return [ String ] The namespace. # # @since 2.0.0 def namespace cursor_document ? cursor_document[NAMESPACE] : super end # Get the documents for the listIndexes result. This is the 'firstBatch' # field in the 'cursor' field of the first document returned. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents cursor_document[FIRST_BATCH] end # Validate the result. In the case where the database or collection # does not exist on the server we will get an error, and it's better # to raise a meaningful exception here than the ambiguous one when # the error occurs. # # @example Validate the result. # result.validate! # # @raise [ NoNamespace ] If the ns doesn't exist. # # @return [ Result ] Self if successful. # # @since 2.0.0 def validate! !successful? ? raise(Error::OperationFailure.new(parser.message, self)) : self end private def cursor_document @cursor_document ||= first_document[CURSOR] end def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/list_collections/0000755000004100000410000000000013257253113023727 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/list_collections/result.rb0000644000004100000410000000554013257253113025576 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class ListCollections # Defines custom behaviour of results when using the # listCollections command. # # @since 2.0.0 class Result < Operation::Result # Get the cursor id for the result. # # @example Get the cursor id. # result.cursor_id # # @note Even though the wire protocol has a cursor_id field for all # messages of type reply, it is always zero when using the # listCollections command and must be retrieved from the cursor # document itself. # # @return [ Integer ] The cursor id. # # @since 2.0.0 def cursor_id cursor_document ? cursor_document[CURSOR_ID] : super end # Get the namespace for the cursor. # # @example Get the namespace. # result.namespace # # @return [ String ] The namespace. # # @since 2.0.0 def namespace cursor_document ? cursor_document[NAMESPACE] : super end # Get the documents for the listCollections result. It is the 'firstBatch' # field in the 'cursor' field of the first document returned. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents cursor_document[FIRST_BATCH] end # Validate the result. In the case where an unauthorized client tries # to run the command we need to generate the proper error. # # @example Validate the result. # result.validate! # # @return [ Result ] Self if successful. # # @since 2.0.0 def validate! !successful? ? raise(Error::OperationFailure.new(parser.message, self)) : self end private def cursor_document @cursor_document ||= first_document[CURSOR] end def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/explain/0000755000004100000410000000000013257253113022016 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/explain/result.rb0000644000004100000410000000264013257253113023663 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class Explain # Defines custom behaviour of results in find command with explain. # # @since 2.5.0 class Result < Operation::Result # Get the cursor id. # # @example Get the cursor id. # result.cursor_id # # @return [ 0 ] Always 0 because explain doesn't return a cursor. # # @since 2.5.0 def cursor_id 0 end # Get the documents in the result. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.5.0 def documents reply.documents end end end end end end mongo-2.5.1/lib/mongo/operation/commands/aggregate.rb0000644000004100000410000000414613257253113022636 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB aggregate operation. # # @note An aggregate operation can behave like a read and return a # result set, or can behave like a write operation and # output results to a user-specified collection. # # @example Create the aggregate operation. # Aggregate.new({ # :selector => { # :aggregate => 'test_coll', :pipeline => [{ '$out' => 'test-out' }] # }, # :db_name => 'test_db' # }) # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :selector [ Hash ] The aggregate selector. # option spec :db_name [ String ] The name of the database on which # the operation should be executed. # option spec :options [ Hash ] Options for the aggregate command. # # @since 2.0.0 class Aggregate < Command include TakesWriteConcern include CausallyConsistent private def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else sel = update_selector_for_read_pref(sel, server) opts = update_options_for_slave_ok(options, server) Protocol::Query.new(db_name, query_coll, sel, opts) end end end end end end require 'mongo/operation/commands/aggregate/result' mongo-2.5.1/lib/mongo/operation/commands/explain.rb0000644000004100000410000000151013257253113022340 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Encapsulates behaviour for executing a find command with explain. # # @since 2.5.0 class Explain < Command; end end end end require 'mongo/operation/commands/explain/result' mongo-2.5.1/lib/mongo/operation/commands/aggregate/0000755000004100000410000000000013257253113022304 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/aggregate/result.rb0000644000004100000410000000530413257253113024151 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # Aggregate result wrapper. # # @since 2.0.0 class Aggregate # Defines custom behaviour of results in an aggregation context. # # @since 2.0.0 class Result < Operation::Result # The field name for the aggregation explain information. # # @since 2.0.5 EXPLAIN = 'stages'.freeze # The legacy field name for the aggregation explain information. # # @since 2.0.5 EXPLAIN_LEGACY = 'serverPipeline'.freeze # Get the cursor id for the result. # # @example Get the cursor id. # result.cursor_id # # @note Even though the wire protocol has a cursor_id field for all # messages of type reply, it is always zero when using the # aggregation framework and must be retrieved from the cursor # document itself. Wahnsinn! # # @return [ Integer ] The cursor id. # # @since 2.0.0 def cursor_id cursor_document ? cursor_document[CURSOR_ID] : 0 end # Get the documents for the aggregation result. This is either the # first document's 'result' field, or if a cursor option was selected # it is the 'firstBatch' field in the 'cursor' field of the first # document returned. # # @example Get the documents. # result.documents # # @return [ Array ] The documents. # # @since 2.0.0 def documents reply.documents[0][RESULT] || explain_document || cursor_document[FIRST_BATCH] end private def explain_document first_document[EXPLAIN] || first_document[EXPLAIN_LEGACY] end def cursor_document @cursor_document ||= reply.documents[0][CURSOR] end def first_document @first_document ||= reply.documents[0] end end end end end end mongo-2.5.1/lib/mongo/operation/commands/create.rb0000644000004100000410000000312313257253113022145 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB create collection operation. # # @example Instantiate the operation. # Create.new(selector: { create: 'test' }, :db_name => 'test') # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :db_name [ String ] The name of the database. # option spec :selector [ Hash ] The create collection selector. # option spec :write_concern [ String ] The write concern to use. # Only applied for server version >= 3.4. # # @since 2.4.0 class Create < Command include TakesWriteConcern private def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else Protocol::Query.new(db_name, query_coll, sel, options) end end end end end end mongo-2.5.1/lib/mongo/operation/commands/parallel_scan.rb0000644000004100000410000000341313257253113023504 0ustar www-datawww-data# Copyright (C) 2009-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB parallel scan operation. # # @example Create the parallel scan operation. # ParallelScan.new({ # :db_name => 'test_db', # :coll_name = > 'test_collection', # :cursor_count => 5 # }) # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :db_name [ String ] The name of the database on which # the operation should be executed. # option spec :coll_name [ String ] The collection to scan. # option spec :cursor_count [ Integer ] The number of cursors to use. # option spec :options [ Hash ] Options for the command. # # @since 2.0.0 class ParallelScan < Command include CausallyConsistent private def selector command = { :parallelCollectionScan => coll_name, :numCursors => cursor_count } command[:readConcern] = read_concern if read_concern command[:maxTimeMS] = max_time_ms if max_time_ms command end end end end end require 'mongo/operation/commands/parallel_scan/result' mongo-2.5.1/lib/mongo/operation/commands/indexes.rb0000644000004100000410000000403713257253113022346 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB get indexes operation. # # Initialize the get indexes operation. # # @example Instantiate the operation. # Indexes.new(:db_name => 'test', :coll_name => 'test_coll') # # Initialization: # param [ Hash ] spec The specifications for the insert. # # option spec :db_name [ String ] The name of the database. # option spec :coll_name [ String ] The name of the collection. # # @since 2.0.0 class Indexes include Specifiable include ReadPreference # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The indexes operation response. # # @since 2.0.0 def execute(server) if server.features.list_indexes_enabled? ListIndexes.new(spec).execute(server) else execute_message(server) end end private def execute_message(server) server.with_connection do |connection| Result.new(connection.dispatch([ message(server) ])) end end def selector { ns: namespace } end def query_coll Index::COLLECTION end end end end end mongo-2.5.1/lib/mongo/operation/commands/drop.rb0000644000004100000410000000311113257253113021643 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands # A MongoDB drop collection operation. # # @example Instantiate the operation. # Drop.new(selector: { drop: 'test' }, :db_name => 'test') # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :db_name [ String ] The name of the database. # option spec :selector [ Hash ] The drop collection selector. # option spec :write_concern [ String ] The write concern to use. # Only applied for server version >= 3.4. # # @since 2.4.0 class Drop < Command include TakesWriteConcern private def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else Protocol::Query.new(db_name, query_coll, sel, options) end end end end end end mongo-2.5.1/lib/mongo/operation/commands/collections_info/0000755000004100000410000000000013257253113023707 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/commands/collections_info/result.rb0000644000004100000410000000223113257253113025550 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Commands class CollectionsInfo # Defines custom behaviour of results when query the system.namespaces # collection. # # @since 2.1.0 class Result < Operation::Result # Get the namespace for the cursor. # # @example Get the namespace. # result.namespace # # @return [ String ] The namespace. # # @since 2.1.0 def namespace Database::NAMESPACES end end end end end end mongo-2.5.1/lib/mongo/operation/read_preference.rb0000644000004100000410000000360013257253113022212 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # Adds behaviour for updating the options and the selector for operations # that need to take read preference into account. # # @since 2.0.0 module ReadPreference include UsesCommandOpMsg # The constant for slave ok flags. # # @since 2.0.6 SLAVE_OK = :slave_ok private def update_selector_for_read_pref(sel, server) if read && server.mongos? && read_pref = read.to_mongos sel = sel[:$query] ? sel : { :$query => sel } sel.merge(:$readPreference => read_pref) else sel end end def slave_ok?(server) (server.cluster.single? && !server.mongos?) || (read && read.slave_ok?) end def update_options_for_slave_ok(opts, server) if slave_ok?(server) opts.dup.tap do |o| (o[:flags] ||= []) << SLAVE_OK end else opts end end def message(server) if server.features.op_msg_enabled? command_op_msg(server, selector, options) else sel = update_selector_for_read_pref(selector, server) opts = update_options_for_slave_ok(options, server) Protocol::Query.new(db_name, query_coll, sel, opts) end end end end end mongo-2.5.1/lib/mongo/operation/object_id_generator.rb0000644000004100000410000000172713257253113023101 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # The default generator of ids for documents. # # @since 2.2.0 class ObjectIdGenerator # Generate a nwe id. # # @example Generate the id. # object_id_generator.generate # # @return [ BSON::ObjectId ] The new id. # # @since 2.2.0 def generate BSON::ObjectId.new end end end end mongo-2.5.1/lib/mongo/operation/uses_command_op_msg.rb0000644000004100000410000000535413257253113023132 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # A command that uses OP_MSG, with the document as payload type 0. # # @since 2.5.0 module UsesCommandOpMsg private ZERO_TIMESTAMP = BSON::Timestamp.new(0,0) READ_PREFERENCE = '$readPreference'.freeze def apply_causal_consistency!(selector, server); end def apply_cluster_time!(selector, server) if !server.standalone? cluster_time = [ server.cluster_time, (session && session.cluster_time) ].max_by do |doc| (doc && doc[Cluster::CLUSTER_TIME]) || ZERO_TIMESTAMP end if cluster_time && (cluster_time[Cluster::CLUSTER_TIME] > ZERO_TIMESTAMP) selector[CLUSTER_TIME] = cluster_time end end end def apply_session_id!(selector) session.add_id!(selector) if session && !unacknowledged_write? end def unacknowledged_write? write_concern && write_concern.get_last_error.nil? end def update_selector_for_session!(selector, server) # the driver MUST ignore any implicit session if at the point it is sending a command # to a specific server it turns out that that particular server doesn't support sessions after all if server.features.sessions_enabled? apply_cluster_time!(selector, server) selector[:txnNumber] = BSON::Int64.new(txn_num) if txn_num if session apply_session_id!(selector) apply_causal_consistency!(selector, server) end elsif session && !session.implicit? apply_cluster_time!(selector, server) apply_session_id!(selector) apply_causal_consistency!(selector, server) selector[:txnNumber] = BSON::Int64.new(txn_num) if txn_num end end def command_op_msg(server, selector, options) update_selector_for_session!(selector, server) selector[Protocol::Msg::DATABASE_IDENTIFIER] = db_name selector[READ_PREFERENCE] = read.to_doc if read flags = unacknowledged_write? ? [:more_to_come] : [:none] Protocol::Msg.new(flags, options, selector) end end end end mongo-2.5.1/lib/mongo/operation/causally_consistent.rb0000644000004100000410000000205413257253113023171 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # Encapsulates behaviour for adding a causal consistency doc to the command. # # @since 2.5.0 module CausallyConsistent private def apply_causal_consistency!(selector, server) if !server.standalone? full_read_concern_doc = session.send(:causal_consistency_doc, selector[:readConcern]) selector[:readConcern] = full_read_concern_doc if full_read_concern_doc end end end end end mongo-2.5.1/lib/mongo/operation/write/0000755000004100000410000000000013257253113017707 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/insert/0000755000004100000410000000000013257253113021213 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/insert/result.rb0000644000004100000410000000356613257253113023070 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write class Insert # Defines custom behaviour of results for an insert. # # According to the CRUD spec, reporting the inserted ids # is optional. It can be added to this class later, if needed. # # @since 2.0.0 class Result < Operation::Result # Get the ids of the inserted documents. # # @since 2.0.0 attr_reader :inserted_ids # Initialize a new result. # # @example Instantiate the result. # Result.new(replies, inserted_ids) # # @param [ Protocol::Message ] replies The wire protocol replies. # @param [ Array ] ids The ids of the inserted documents. # # @since 2.0.0 def initialize(replies, ids) @replies = [ *replies ] if replies @inserted_ids = ids end # Gets the id of the document inserted. # # @example Get id of the document inserted. # result.inserted_id # # @return [ Object ] The id of the document inserted. # # @since 2.0.0 def inserted_id inserted_ids.first end end end end end end mongo-2.5.1/lib/mongo/operation/write/update/0000755000004100000410000000000013257253113021171 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/update/result.rb0000644000004100000410000001121413257253113023033 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write class Update # Defines custom behaviour of results for an update. # # @since 2.0.0 class Result < Operation::Result # The number of modified docs field in the result. # # @since 2.0.0 MODIFIED = 'nModified'.freeze # The upserted docs field in the result. # # @since 2.0.0 UPSERTED = 'upserted'.freeze # Get the number of documents matched. # # @example Get the matched count. # result.matched_count # # @return [ Integer ] The matched count. # # @since 2.0.0 def matched_count return 0 unless acknowledged? if upsert? 0 else n end end # Get the number of documents modified. # # @example Get the modified count. # result.modified_count # # @return [ Integer ] The modified count. # # @since 2.0.0 def modified_count return 0 unless acknowledged? first[MODIFIED] end # The identifier of the inserted document if an upsert # took place. # # @example Get the upserted document's identifier. # result.upserted_id # # @return [ Object ] The upserted id. # # @since 2.0.0 def upserted_id return nil unless upsert? upsert?.first['_id'] end # Returns the number of documents upserted. # # @example Get the number of upserted documents. # result.upserted_count # # @return [ Integer ] The number upserted. # # @since 2.4.2 def upserted_count upsert? ? n : 0 end private def upsert? first[UPSERTED] end end # Defines custom behaviour of results for an update on server # version <= 2.4. # # @since 2.0.0 class LegacyResult < Operation::Result # Whether an existing document was updated. # # @since 2.0.0 UPDATED_EXISTING = 'updatedExisting'.freeze # The upserted docs field in the result. # # @since 2.0.0 UPSERTED = 'upserted'.freeze # Get the number of documents matched. # # @example Get the matched count. # result.matched_count # # @return [ Integer ] The matched count. # # @since 2.0.0 def matched_count return 0 unless acknowledged? if upsert? 0 else n end end # Get the number of documents modified. # # @example Get the modified count. # result.modified_count # # @return [ nil ] Always omitted for legacy versions. # # @since 2.0.0 def modified_count; end # The identifier of the inserted document if an upsert # took place. # # @example Get the upserted document's identifier. # result.upserted_id # # @return [ Object ] The upserted id. # # @since 2.0.0 def upserted_id first[UPSERTED] if upsert? end # Returns the number of documents upserted. # # @example Get the number of upserted documents. # result.upserted_count # # @return [ Integer ] The number upserted. # # @since 2.4.2 def upserted_count upsert? ? n : 0 end private def upsert? !updated_existing? && n == 1 end def updated_existing? first[UPDATED_EXISTING] end end end end end end mongo-2.5.1/lib/mongo/operation/write/update.rb0000644000004100000410000000534213257253113021522 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/update/result' module Mongo module Operation module Write # A MongoDB update operation. # # @note If the server version is >= 2.5.5, a write command operation # will be created and sent instead. # # @example Create the update operation. # Write::Update.new({ # :update => # { # :q => { :foo => 1 }, # :u => { :$set => { :bar => 1 }}, # :multi => true, # :upsert => false # :array_filters => [] # }, # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern # }) # Initialization: # param [ Hash ] spec The specifications for the update. # # option spec :update [ Hash ] The update document. # option spec :db_name [ String ] The name of the database on which # the query should be run. # option spec :coll_name [ String ] The name of the collection on which # the query should be run. # option spec :write_concern [ Mongo::WriteConcern ] The write concern. # option spec :options [ Hash ] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Update include GLE include WriteCommandEnabled include Specifiable private def write_command_op s = spec.merge(:updates => [ update ]) s.delete(:update) Command::Update.new(s) end def has_array_filters? update[Operation::ARRAY_FILTERS] end def has_collation? update[:collation] || update[Operation::COLLATION] end def message(server) flags = [] flags << :multi_update if update[Operation::MULTI] flags << :upsert if update[Operation::UPSERT] Protocol::Update.new( db_name, coll_name, update[Operation::Q], update[Operation::U], flags.empty? ? {} : { flags: flags } ) end end end end end mongo-2.5.1/lib/mongo/operation/write/gle.rb0000644000004100000410000000305213257253113021003 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write # This module contains common functionality for operations that need to # be followed by a GLE message. # # @since 2.1.0 module GLE private def execute_message(server) server.with_connection do |connection| result_class = self.class.const_defined?(:LegacyResult, false) ? self.class::LegacyResult : self.class.const_defined?(:Result, false) ? self.class::Result : Result result_class.new(connection.dispatch([ message(server), gle ].compact)).validate! end end def gle wc = write_concern || WriteConcern.get(WriteConcern::DEFAULT) if gle_message = wc.get_last_error Protocol::Query.new( db_name, Database::COMMAND, gle_message, options.merge(limit: -1) ) end end end end end end mongo-2.5.1/lib/mongo/operation/write/idable.rb0000644000004100000410000000336513257253113021463 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write # This module provides functionality to ensure that documents contain # an id field. Used by insert operations (Bulk, legacy, write command inserts). # # @since 2.1.0 module Idable # The option for a custom id generator. # # @since 2.2.0 ID_GENERATOR = :id_generator.freeze # Get the id generator. # # @example Get the id generator. # idable.id_generator # # @return [ IdGenerator ] The default or custom id generator. # # @since 2.2.0 def id_generator @id_generator ||= (spec[ID_GENERATOR] || ObjectIdGenerator.new) end private def id(doc) doc.respond_to?(:id) ? doc.id : (doc['_id'] || doc[:_id]) end def has_id?(doc) !!id(doc) end def ensure_ids(documents) @ids ||= [] documents.collect do |doc| doc_with_id = has_id?(doc) ? doc : doc.merge(_id: id_generator.generate) @ids << id(doc_with_id) doc_with_id end end end end end end mongo-2.5.1/lib/mongo/operation/write/command.rb0000644000004100000410000000201313257253113021646 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/command/writable' require 'mongo/operation/write/command/delete' require 'mongo/operation/write/command/insert' require 'mongo/operation/write/command/update' require 'mongo/operation/write/command/drop_index' require 'mongo/operation/write/command/create_index' require 'mongo/operation/write/command/create_user' require 'mongo/operation/write/command/update_user' require 'mongo/operation/write/command/remove_user' mongo-2.5.1/lib/mongo/operation/write/command/0000755000004100000410000000000013257253113021325 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/command/update.rb0000644000004100000410000000451213257253113023136 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # A MongoDB update write command operation. # # @example Create an update write command operation. # Write::Command::Update.new({ # :updates => [{ # :q => { :foo => 1 }, # :u => { :$set => # :bar => 1 }}, # :multi => true, # :upsert => false # :array_filters => [] # }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern, # :ordered => true, # :bypass_document_validation => true # }) # # @since 2.0.0 class Update include Specifiable include Writable private IDENTIFIER = 'updates'.freeze def selector { update: coll_name, updates: updates }.merge(command_options) end def op_msg(server) global_args = { update: coll_name, Protocol::Msg::DATABASE_IDENTIFIER => db_name }.merge!(command_options) update_selector_for_session!(global_args, server) section = { type: 1, payload: { identifier: IDENTIFIER, sequence: updates } } flags = unacknowledged_write? ? [:more_to_come] : [:none] Protocol::Msg.new(flags, {}, global_args, section) end def message(server) if server.features.op_msg_enabled? op_msg(server) else Protocol::Query.new(db_name, Database::COMMAND, selector, options) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/remove_user.rb0000644000004100000410000000344513257253113024213 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # Remove user commands on non-legacy servers. # # @since 2.0.0 class RemoveUser include Specifiable include Writable # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.5.0 def execute(server) result = Result.new(server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end) server.update_cluster_time(result) session.process(result) if session result.validate! end private # The query selector for this drop user command operation. # # @return [ Hash ] The selector describing this drop user operation. # # @since 2.0.0 def selector { :dropUser => user_name } end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/insert.rb0000644000004100000410000000422713257253113023163 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # A MongoDB insert write command operation. # # @example Create an insert write command operation. # Write::Command::Insert.new({ # :documents => [{ :foo => 1 }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern, # :ordered => true # }) # @since 2.0.0 class Insert include Specifiable include Writable private IDENTIFIER = 'documents'.freeze def selector { insert: coll_name, documents: documents }.merge!(command_options) end def op_msg(server) global_args = { insert: coll_name, Protocol::Msg::DATABASE_IDENTIFIER => db_name }.merge!(command_options) update_selector_for_session!(global_args, server) section = { type: 1, payload: { identifier: IDENTIFIER, sequence: documents } } flags = unacknowledged_write? ? [:more_to_come] : [:none] Protocol::Msg.new(flags, { validating_keys: true }, global_args, section) end def message(server) if server.features.op_msg_enabled? op_msg(server) else opts = options.merge(validating_keys: true) Protocol::Query.new(db_name, Database::COMMAND, selector, opts) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/create_user.rb0000644000004100000410000000352713257253113024162 0ustar www-datawww-data # Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # Create user commands on non-legacy servers. # # @since 2.0.0 class CreateUser include Specifiable include Writable # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.5.0 def execute(server) result = Result.new(server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end) server.update_cluster_time(result) session.process(result) if session result.validate! end private # The query selector for this create user command operation. # # @return [ Hash ] The selector describing this create user operation. # # @since 2.0.0 def selector { :createUser => user.name, :digestPassword => false }.merge(user.spec) end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/drop_index.rb0000644000004100000410000000474013257253113024012 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # A MongoDB drop index write command operation. # # @example Create the drop index operation. # Write::Command::DropIndex.new({ # :index => { :foo => 1 }, # :db_name => 'test', # :coll_name => 'test_coll', # :index_name => 'foo_1' # }) # # @since 2.0.0 class DropIndex include Specifiable include Writable include TakesWriteConcern include UsesCommandOpMsg # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.5.0 def execute(server) result = Result.new(server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end) server.update_cluster_time(result) session.process(result) if session result.validate! end private # The query selector for this drop index command operation. # # @return [ Hash ] The selector describing this insert operation. # # @since 2.0.0 def selector { :dropIndexes => coll_name, :index => index_name } end def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else Protocol::Query.new(db_name, Database::COMMAND, sel, options) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/delete.rb0000644000004100000410000000471613257253113023124 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # A MongoDB delete write command operation. # # @example Initialize a delete write command. # Write::Command::Delete.new({ # :deletes => [{ :q => { :foo => 1 }, :limit => 1 }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern, # :ordered => true # }) # # @since 2.0.0 class Delete include Specifiable include Writable private IDENTIFIER = 'deletes'.freeze # The query selector for this delete command operation. # # @return [ Hash ] The selector describing this delete operation. # # @since 2.0.0 def selector { delete: coll_name, deletes: deletes }.merge(command_options) end def op_msg(server) global_args = { delete: coll_name, Protocol::Msg::DATABASE_IDENTIFIER => db_name }.merge!(command_options) update_selector_for_session!(global_args, server) section = { type: 1, payload: { identifier: IDENTIFIER, sequence: deletes } } flags = unacknowledged_write? ? [:more_to_come] : [:none] Protocol::Msg.new(flags, {}, global_args, section) end # The wire protocol message for this write operation. # # @return [ Mongo::Protocol::Query ] Wire protocol message. # # @since 2.0.0 def message(server) if server.features.op_msg_enabled? op_msg(server) else Protocol::Query.new(db_name, Database::COMMAND, selector, options) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/writable.rb0000644000004100000410000000425413257253113023470 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # Provides common behavior for write commands. # Assigns an operation id when executed. # # @since 2.0.0 module Writable include Limited include UsesCommandOpMsg # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.0.0 def execute(server) server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end end private def command_options opts = { ordered: ordered? } opts[:writeConcern] = write_concern.options if write_concern opts[:collation] = collation if collation opts[:bypassDocumentValidation] = true if bypass_document_validation opts end # The wire protocol message for this write operation. # # @return [ Mongo::Protocol::Query ] Wire protocol message. # # @since 2.0.0 def message(server) if server.features.op_msg_enabled? command_op_msg(server, selector, options) else Protocol::Query.new(db_name, Database::COMMAND, selector, options) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/update_user.rb0000644000004100000410000000352613257253113024200 0ustar www-datawww-data # Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # Update user command on non-legacy servers. # # @since 2.0.0 class UpdateUser include Specifiable include Writable # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.5.0 def execute(server) result = Result.new(server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end) server.update_cluster_time(result) session.process(result) if session result.validate! end private # The query selector for this update user command operation. # # @return [ Hash ] The selector describing this update user operation. # # @since 2.0.0 def selector { :updateUser => user.name, :digestPassword => false }.merge(user.spec) end end end end end end mongo-2.5.1/lib/mongo/operation/write/command/create_index.rb0000644000004100000410000000476613257253113024321 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Command # A MongoDB ensure index write command operation. # # @example Create an ensure index command operation. # Write::Command::CreateIndex.new({ # :indexes => [{ :key => { :foo => 1 }, :name => 'foo_1', :unique => true }], # :db_name => 'test', # :coll_name => 'test_coll' # }) # # @since 2.0.0 class CreateIndex include Specifiable include Writable include TakesWriteConcern include UsesCommandOpMsg # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.5.0 def execute(server) result = Result.new(server.with_connection do |connection| connection.dispatch([ message(server) ], operation_id) end) server.update_cluster_time(result) session.process(result) if session result.validate! end private # The query selector for this ensure index command operation. # # @return [ Hash ] The selector describing this insert operation. # # @since 2.0.0 def selector { :createIndexes => coll_name, :indexes => indexes } end def message(server) sel = update_selector_for_write_concern(selector, server) if server.features.op_msg_enabled? command_op_msg(server, sel, options) else Protocol::Query.new(db_name, Database::COMMAND, sel, options) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/insert.rb0000644000004100000410000000501513257253113021541 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/insert/result' module Mongo module Operation module Write # A MongoDB insert operation. # # @note If a server with version >= 2.5.5 is being used, a write command # operation will be created and sent instead. # # @example Create the new insert operation. # Write::Insert.new({ # :documents => [{ :foo => 1 }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern # }) # # Initialization: # param [ Hash ] spec The specifications for the insert. # # option spec :documents [ Array ] The documents to insert. # option spec :db_name [ String ] The name of the database. # option spec :coll_name [ String ] The name of the collection. # option spec :write_concern [ Mongo::WriteConcern ] The write concern. # option spec :options [ Hash ] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Insert include GLE include WriteCommandEnabled include Specifiable include Idable private def execute_write_command(server) command_spec = spec.merge(:documents => ensure_ids(documents)) result = Result.new(Command::Insert.new(command_spec).execute(server), @ids) server.update_cluster_time(result) session.process(result) if session result.validate! end def execute_message(server) server.with_connection do |connection| Result.new(connection.dispatch([ message(server), gle ].compact), @ids).validate! end end def message(server) opts = !!options[:continue_on_error] ? { :flags => [:continue_on_error] } : {} Protocol::Insert.new(db_name, coll_name, ensure_ids(documents), opts) end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/0000755000004100000410000000000013257253113020644 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/bulk/insert/0000755000004100000410000000000013257253113022150 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/bulk/insert/result.rb0000644000004100000410000000767213257253113024027 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk class Insert # Defines custom behaviour of results when inserting. # # @since 2.0.0 class Result < Operation::Result include Mergable # Get the ids of the inserted documents. # # @since 2.0.0 attr_reader :inserted_ids # Initialize a new result. # # @example Instantiate the result. # Result.new(replies, inserted_ids) # # @param [ Protocol::Message ] replies The wire protocol replies. # @param [ Array ] ids The ids of the inserted documents. # # @since 2.0.0 def initialize(replies, ids) @replies = [ *replies ] if replies @inserted_ids = ids end # Gets the number of documents inserted. # # @example Get the number of documents inserted. # result.n_inserted # # @return [ Integer ] The number of documents inserted. # # @since 2.0.0 def n_inserted written_count end # Gets the id of the document inserted. # # @example Get id of the document inserted. # result.inserted_id # # @return [ Object ] The id of the document inserted. # # @since 2.0.0 def inserted_id inserted_ids.first end end # Defines custom behaviour of results when inserting. # For server versions < 2.5.5 (that don't use write commands). # # @since 2.0.0 class LegacyResult < Operation::Result include LegacyMergable # Get the ids of the inserted documents. # # @since 2.0.0 attr_reader :inserted_ids # Initialize a new result. # # @example Instantiate the result. # Result.new(replies, inserted_ids) # # @param [ Protocol::Message ] replies The wire protocol replies. # @param [ Array ] ids The ids of the inserted documents. # # @since 2.0.0 def initialize(replies, ids) @replies = [ *replies ] if replies @inserted_ids = ids end # Gets the number of documents inserted. # # @example Get the number of documents inserted. # result.n_inserted # # @return [ Integer ] The number of documents inserted. # # @since 2.0.0 def n_inserted return 0 unless acknowledged? @replies.reduce(0) do |n, reply| n += 1 unless reply_write_errors?(reply) n end end # Gets the id of the document inserted. # # @example Get id of the document inserted. # result.inserted_id # # @return [ Object ] The id of the document inserted. # # @since 2.0.0 def inserted_id inserted_ids.first end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/update/0000755000004100000410000000000013257253113022126 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/bulk/update/result.rb0000644000004100000410000001352313257253113023775 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk class Update # Defines custom behaviour of results when updating. # # @since 2.0.0 class Result < Operation::Result include Mergable # The number of modified docs field in the result. # # @since 2.0.0 MODIFIED = 'nModified'.freeze # The upserted docs field in the result. # # @since 2.0.0 UPSERTED = 'upserted'.freeze # Gets the number of documents upserted. # # @example Get the upserted count. # result.n_upserted # # @return [ Integer ] The number of documents upserted. # # @since 2.0.0 def n_upserted return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if upsert?(reply) n += reply.documents.first[UPSERTED].size else n end end end # Gets the number of documents matched. # # @example Get the matched count. # result.n_matched # # @return [ Integer ] The number of documents matched. # # @since 2.0.0 def n_matched return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if upsert?(reply) reply.documents.first[N] - n_upserted else if reply.documents.first[N] n += reply.documents.first[N] else n end end end end # Gets the number of documents modified. # Not that in a mixed sharded cluster a call to # update could return nModified (>= 2.6) or not (<= 2.4). # If any call does not return nModified we can't report # a valid final count so set the field to nil. # # @example Get the modified count. # result.n_modified # # @return [ Integer ] The number of documents modified. # # @since 2.0.0 def n_modified return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if n && reply.documents.first[MODIFIED] n += reply.documents.first[MODIFIED] else nil end end end # Get the upserted documents. # # @example Get upserted documents. # result.upserted # # @return [ Array ] The upserted document info # # @since 2.1.0 def upserted reply.documents.first[UPSERTED] || [] end private def upsert?(reply) upserted.any? end end # Defines custom behaviour of results when updating. # For server versions < 2.5.5 (that don't use write commands). # # @since 2.0.0 class LegacyResult < Operation::Result include LegacyMergable # The updated existing field in the result. # # @since 2.0.0 UPDATED_EXISTING = 'updatedExisting'.freeze # Gets the number of documents upserted. # # @example Get the upserted count. # result.n_upserted # # @return [ Integer ] The number of documents upserted. # # @since 2.0.0 def n_upserted return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if upsert?(reply) n += reply.documents.first[N] else n end end end # Gets the number of documents matched. # # @example Get the matched count. # result.n_matched # # @return [ Integer ] The number of documents matched. # # @since 2.0.0 def n_matched return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if upsert?(reply) n else n += reply.documents.first[N] end end end # Gets the number of documents modified. # # @example Get the modified count. # result.n_modified # # @return [ Integer ] The number of documents modified. # # @since 2.2.3 def n_modified; end private def upsert?(reply) reply.documents.first[BulkWrite::Result::UPSERTED] || (!updated_existing?(reply) && reply.documents.first[N] == 1) end def updated_existing?(reply) reply.documents.first[UPDATED_EXISTING] end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/update.rb0000644000004100000410000000531713257253113022461 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/bulk/update/result' module Mongo module Operation module Write module Bulk # A MongoDB bulk update operation. # # @note If the server version is >= 2.5.5, a write command operation # will be created and sent instead. # # @example Create the update operation. # Write::BulkUpdate.new({ # :updates => [ # { # :q => { :foo => 1 }, # :u => { :$set => { :bar => 1 }}, # :multi => true, # :upsert => false # } # ], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern, # :ordered => false # }) # # Initialization: # param [ Hash ] spec The specifications for the update. # # option spec :updates [ Array ] The update documents. # option spec :db_name [ String ] The name of the database on which # the query should be run. # option spec :coll_name [ String ] The name of the collection on which # the query should be run. # option spec :write_concern [ Mongo::WriteConcern ] The write concern. # option spec :ordered [ true, false ] Whether the operations should be # executed in order. # option spec :options [ Hash ] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Update include Bulkable include Specifiable private def execute_write_command(server) Result.new(Command::Update.new(spec).execute(server)) end def messages updates.collect do |u| opts = { :flags => [] } opts[:flags] << :multi_update if !!u[Operation::MULTI] opts[:flags] << :upsert if !!u[Operation::UPSERT] Protocol::Update.new(db_name, coll_name, u[Operation::Q], u[Operation::U], opts) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/mergable.rb0000644000004100000410000000512113257253113022746 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk # This module contains common functionality for merging results from # write commands during a bulk operation. Used for server versions >= 2.6. # # @since 2.0.0 module Mergable # Aggregate the write errors returned from this result. # # @example Aggregate the write errors. # result.aggregate_write_errors(0) # # @param [ Integer ] count The number of documents already executed. # # @return [ Array ] The aggregate write errors. # # @since 2.0.0 def aggregate_write_errors(count) return unless @replies @replies.reduce(nil) do |errors, reply| if write_errors = reply.documents.first[Error::WRITE_ERRORS] wes = write_errors.collect do |we| we.merge!('index' => count + we['index']) end (errors || []) << wes if wes end end end # Aggregate the write concern errors returned from this result. # # @example Aggregate the write concern errors. # result.aggregate_write_concern_errors(100) # # @param [ Integer ] count The number of documents already executed. # # @return [ Array ] The aggregate write concern errors. # # @since 2.0.0 def aggregate_write_concern_errors(count) return unless @replies @replies.each_with_index.reduce(nil) do |errors, (reply, _)| if write_concern_errors = reply.documents.first[Error::WRITE_CONCERN_ERRORS] (errors || []) << write_concern_errors.reduce(nil) do |errs, wce| wce.merge!('index' => count + wce['index']) (errs || []) << write_concern_error end end end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/bulkable.rb0000644000004100000410000000507113257253113022755 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk # Provides common behavior for bulk write operations. # Note that #validate! is not called on operation results because they are merged # at a higher level. # # @since 2.1.0 module Bulkable # Execute the bulk operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation result. # # @since 2.0.0 def execute(server) result = execute_write_command(server) server.update_cluster_time(result) session.process(result) if session result end private def execute_message(server) replies = messages.map do |m| server.with_connection do |connection| result = self.class::LegacyResult.new(connection.dispatch([ m, gle ].compact, operation_id)) if stop_sending?(result) return result else result.reply end end end self.class::LegacyResult.new(replies.compact.empty? ? nil : replies) end def stop_sending?(result) ordered? && !result.successful? end def gle wc = write_concern || WriteConcern.get(WriteConcern::DEFAULT) gle_message = ( ordered? && wc.get_last_error.nil? ) ? WriteConcern.get(WriteConcern::DEFAULT).get_last_error : wc.get_last_error if gle_message Protocol::Query.new( db_name, Database::COMMAND, gle_message, options.merge(limit: -1) ) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/insert.rb0000644000004100000410000000642013257253113022477 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/bulk/insert/result' module Mongo module Operation module Write module Bulk # A MongoDB bulk insert operation. # This class should only be used by the Bulk API. # # @note If a server with version >= 2.5.5 is being used, a write command # operation will be created and sent instead. # # @example Create the new insert operation. # Write::BulkInsert.new({ # :documents => [{ :foo => 1 }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern, # :ordered => false # }) # # Initialization: # param [ Hash ] spec The specifications for the insert. # # option spec :documents [ Array ] The documents to insert. # option spec :db_name [ String ] The name of the database. # option spec :coll_name [ String ] The name of the collection. # option spec :write_concern [ Mongo::WriteConcern ] The write concern. # option spec :ordered [ true, false ] Whether the operations should be # executed in order. # option spec :options [ Hash ] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Insert include Bulkable include Specifiable include Idable private def execute_write_command(server) command_spec = spec.merge(:documents => ensure_ids(documents)) Result.new(Command::Insert.new(command_spec).execute(server), @ids) end def execute_message(server) replies = [] messages.map do |m| server.with_connection do |connection| result = LegacyResult.new(connection.dispatch([ m, gle ].compact, operation_id), @ids) replies << result.reply if stop_sending?(result) return LegacyResult.new(replies, @ids) end end end LegacyResult.new(replies.compact.empty? ? nil : replies, @ids) end def messages if ordered? || gle documents.collect do |doc| Protocol::Insert.new(db_name, coll_name, ensure_ids([ doc ]), spec) end else [ Protocol::Insert.new( db_name, coll_name, ensure_ids(documents), spec.merge({ :flags => [:continue_on_error] }) ) ] end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/legacy_mergable.rb0000644000004100000410000000620013257253113024271 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk # This module contains common functionality for merging results from # writes during a bulk operation. Used for server versions < 2.6. # # @since 2.0.0 module LegacyMergable # Aggregate the write errors returned from this result. # # @example Aggregate the write errors. # result.aggregate_write_errors(0) # # @param [ Integer ] count The number of documents already executed. # # @return [ Array ] The aggregate write errors. # # @since 2.0.0 def aggregate_write_errors(count) @replies.each_with_index.reduce(nil) do |errors, (reply, i)| if reply_write_errors?(reply) errors ||= [] errors << { 'errmsg' => reply.documents.first[Error::ERROR], 'index' => count + i, 'code' => reply.documents.first[Error::CODE] } end errors end if @replies end # Aggregate the write concern errors returned from this result. # # @example Aggregate the write concern errors. # result.aggregate_write_concern_errors(4) # # @param [ Integer ] count The number of documents already executed. # # @return [ Array ] The aggregate write concern errors. # # @since 2.0.0 def aggregate_write_concern_errors(count) @replies.each_with_index.reduce(nil) do |errors, (reply, i)| if error = reply_write_errors?(reply) errors ||= [] if note = reply.documents.first['wnote'] || reply.documents.first['jnote'] code = reply.documents.first['code'] || Error::BAD_VALUE error_string = "#{code}: #{note}" elsif error == 'timeout' code = reply.documents.first['code'] || Error::UNKNOWN_ERROR error_string = "#{code}: #{error}" end errors << { 'errmsg' => error_string, 'index' => count + i, 'code' => code } if error_string end errors end if @replies end private def reply_write_errors?(reply) reply.documents.first[Error::ERROR] || reply.documents.first[Error::ERRMSG] end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/delete.rb0000644000004100000410000000474013257253113022440 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/bulk/delete/result' module Mongo module Operation module Write module Bulk # A MongoDB bulk delete operation. # # @note If a server with version >= 2.5.5 is selected, a write command # operation will be created and sent instead. # # @example Create the delete operation. # Write::Bulk::Delete.new({ # :deletes => [{ :q => { :foo => 1 }, :limit => 1 }], # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern # }) # # Initialization: # param [ Hash ] spec The specifications for the delete. # # option spec :deletes [ Array ] The delete documents. # option spec :db_name [ String ] The name of the database on which # the delete should be executed. # option spec :coll_name [ String ] The name of the collection on which # the delete should be executed. # option spec :write_concern [ Mongo::WriteConcern ] The write concern # for this operation. # option spec :ordered [ true, false ] Whether the operations should be # executed in order. # option spec :options [Hash] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Delete include Bulkable include Specifiable private def execute_write_command(server) Result.new(Command::Delete.new(spec).execute(server)) end def messages deletes.collect do |del| opts = ( del[Operation::LIMIT] || 0 ) <= 0 ? {} : { :flags => [ :single_remove ] } Protocol::Delete.new(db_name, coll_name, del[Operation::Q], opts) end end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk/delete/0000755000004100000410000000000013257253113022106 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/bulk/delete/result.rb0000644000004100000410000000411513257253113023752 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write module Bulk class Delete # Defines common r_removed aggreation behaviour. # # @since 2.2.0 module Aggregatable # Gets the number of documents deleted. # # @example Get the deleted count. # result.n_removed # # @return [ Integer ] The number of documents deleted. # # @since 2.0.0 def n_removed return 0 unless acknowledged? @replies.reduce(0) do |n, reply| if reply.documents.first[Result::N] n += reply.documents.first[Result::N] else n end end end end # Defines custom behaviour of results when deleting. # # @since 2.0.0 class Result < Operation::Result include Mergable include Aggregatable # The aggregate number of deleted docs reported in the replies. # # @since 2.0.0 REMOVED = 'nRemoved'.freeze end # Defines custom behaviour of results when deleting. # For server versions < 2.5.5 (that don't use write commands). # # @since 2.0.0 class LegacyResult < Operation::Result include LegacyMergable include Aggregatable end end end end end end mongo-2.5.1/lib/mongo/operation/write/delete.rb0000644000004100000410000000476513257253113021512 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/delete/result' module Mongo module Operation module Write # A MongoDB delete operation. # # @note If a server with version >= 2.5.5 is selected, a write command # operation will be created and sent instead. # # @example Create the delete operation. # Write::Delete.new({ # :delete => { :q => { :foo => 1 }, :limit => 1 }, # :db_name => 'test', # :coll_name => 'test_coll', # :write_concern => write_concern # }) # # Initialization: # param [ Hash ] spec The specifications for the delete. # # option spec :delete [ Hash ] The delete document. # option spec :db_name [ String ] The name of the database on which # the delete should be executed. # option spec :coll_name [ String ] The name of the collection on which # the delete should be executed. # option spec :write_concern [ Mongo::WriteConcern ] The write concern # for this operation. # option spec :ordered [ true, false ] Whether the operations should be # executed in order. # option spec :options [Hash] Options for the command, if it ends up being a # write command. # # @since 2.0.0 class Delete include GLE include WriteCommandEnabled include Specifiable private def write_command_op s = spec.merge(:deletes => [ delete ]) s.delete(:delete) Command::Delete.new(s) end def has_collation? delete[:collation] || delete[Operation::COLLATION] end def message(server) selector = delete[Operation::Q] opts = (delete[Operation::LIMIT] || 0) <= 0 ? {} : { :flags => [ :single_remove ] } Protocol::Delete.new(db_name, coll_name, selector, opts) end end end end end mongo-2.5.1/lib/mongo/operation/write/write_command_enabled.rb0000644000004100000410000000473013257253113024542 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write # This module contains common functionality for operations that send either # a write command or a specific wire protocol message, depending on server version. # For server versions >= 2.6, a write command is sent. # # @since 2.1.0 module WriteCommandEnabled # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation result. # # @since 2.1.0 def execute(server) if unacknowledged_write? raise Error::UnsupportedCollation.new(Error::UnsupportedCollation::UNACKNOWLEDGED_WRITES_MESSAGE) if has_collation? raise Error::UnsupportedArrayFilters.new(Error::UnsupportedArrayFilters::UNACKNOWLEDGED_WRITES_MESSAGE) if has_array_filters? end if server.features.op_msg_enabled? # version 3.6 execute_write_command(server) else # server version is 2.6 through 3.4 if unacknowledged_write? execute_message(server) else execute_write_command(server) end end end private def has_array_filters? false end def has_collation? false end def unacknowledged_write? write_concern && write_concern.get_last_error.nil? end def execute_write_command(server) result_class = self.class.const_defined?(:Result, false) ? self.class::Result : Result result = result_class.new(write_command_op.execute(server)) server.update_cluster_time(result) session.process(result) if session result.validate! end end end end end mongo-2.5.1/lib/mongo/operation/write/delete/0000755000004100000410000000000013257253113021151 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/write/delete/result.rb0000644000004100000410000000214513257253113023016 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Write class Delete # Defines custom behaviour of results for a delete. # # @since 2.0.0 class Result < Operation::Result # Get the number of documents deleted. # # @example Get the deleted count. # result.deleted_count # # @return [ Integer ] The deleted count. # # @since 2.0.0 def deleted_count n end end end end end end mongo-2.5.1/lib/mongo/operation/write/bulk.rb0000644000004100000410000000153613257253113021176 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/bulk/mergable' require 'mongo/operation/write/bulk/legacy_mergable' require 'mongo/operation/write/bulk/bulkable' require 'mongo/operation/write/bulk/delete' require 'mongo/operation/write/bulk/insert' require 'mongo/operation/write/bulk/update' mongo-2.5.1/lib/mongo/operation/kill_cursors.rb0000644000004100000410000000224213257253113021615 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # A MongoDB kill cursors operation. # # @example Create the kill cursors operation. # Mongo::Operation::KillCursor.new({ :cursor_ids => [1, 2] }) # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :cursor_ids [ Array ] The ids of cursors to kill. # # @since 2.0.0 class KillCursors include Specifiable include Executable private def message(server) Protocol::KillCursors.new(coll_name, db_name, cursor_ids) end end end end mongo-2.5.1/lib/mongo/operation/takes_write_concern.rb0000644000004100000410000000200713257253113023131 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # Adds behaviour for updating the selector for operations # that may take a write concern. # # @since 2.4.0 module TakesWriteConcern private def update_selector_for_write_concern(sel, server) if write_concern && server.features.collation_enabled? sel.merge(writeConcern: write_concern.options) else sel end end end end end mongo-2.5.1/lib/mongo/operation/read.rb0000644000004100000410000000122613257253113020016 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/read/query' require 'mongo/operation/read/get_more' mongo-2.5.1/lib/mongo/operation/commands.rb0000644000004100000410000000251013257253113020701 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/commands/command' require 'mongo/operation/commands/find' require 'mongo/operation/commands/count' require 'mongo/operation/commands/distinct' require 'mongo/operation/commands/explain' require 'mongo/operation/commands/get_more' require 'mongo/operation/commands/parallel_scan' require 'mongo/operation/commands/aggregate' require 'mongo/operation/commands/map_reduce' require 'mongo/operation/commands/collections_info' require 'mongo/operation/commands/create' require 'mongo/operation/commands/drop' require 'mongo/operation/commands/drop_database' require 'mongo/operation/commands/indexes' require 'mongo/operation/commands/list_collections' require 'mongo/operation/commands/list_indexes' require 'mongo/operation/commands/users_info' mongo-2.5.1/lib/mongo/operation/write.rb0000644000004100000410000000161413257253113020236 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/write/write_command_enabled' require 'mongo/operation/write/idable' require 'mongo/operation/write/gle' require 'mongo/operation/write/bulk' require 'mongo/operation/write/delete' require 'mongo/operation/write/insert' require 'mongo/operation/write/update' require 'mongo/operation/write/command' mongo-2.5.1/lib/mongo/operation/limited.rb0000644000004100000410000000213013257253113020525 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # Adds behaviour for commands so ensure the limit option is always -1. # # @since 2.0.0 module Limited # Limited operations are commands that always require a limit of -1. In # these cases we always overwrite the limit value. # # @example Get the options. # limited.options # # @return [ Hash ] The options with a -1 limit. # # @since 2.0.0 def options super.merge(:limit => -1) end end end end mongo-2.5.1/lib/mongo/operation/executable.rb0000644000004100000410000000302613257253113021224 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation # This module provides the #execute method that many operations use. # It makes sure to instantiate the appropriate Result class for the operation's response. # # @since 2.0.0 module Executable # Execute the operation. # # @example Execute the operation. # operation.execute(server) # # @param [ Mongo::Server ] server The server to send this operation to. # # @return [ Result ] The operation response, if there is one. # # @since 2.0.0 def execute(server) server.with_connection do |connection| result_class = self.class.const_defined?(:Result, false) ? self.class::Result : Result result = result_class.new(connection.dispatch([ message(server) ], operation_id)) server.update_cluster_time(result) session.process(result) if session result.validate! end end end end end mongo-2.5.1/lib/mongo/operation/read/0000755000004100000410000000000013257253113017470 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/read/get_more.rb0000644000004100000410000000320413257253113021615 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Read # A MongoDB get more operation. # # @example Create a get more operation. # Read::GetMore.new({ # :to_return => 50, # :cursor_id => 1, # :db_name => 'test_db', # :coll_name => 'test_coll' # }) # # Initialization: # param [ Hash ] spec The specifications for the operation. # # option spec :to_return [ Integer ] The number of results to return. # option spec :cursor_id [ Integer ] The id of the cursor. # option spec :db_name [ String ] The name of the database on which # the operation should be executed. # option spec :coll_name [ String ] The name of the collection on which # the operation should be executed. # # @since 2.0.0 class GetMore include Specifiable include Executable private def message(server) Protocol::GetMore.new(db_name, coll_name, to_return, cursor_id) end end end end end mongo-2.5.1/lib/mongo/operation/read/query.rb0000644000004100000410000000316613257253113021170 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/operation/read/query/result' module Mongo module Operation module Read # A MongoDB query operation. # # @example Create the query operation. # Read::Query.new({ # :selector => { :foo => 1 }, # :db_name => 'test-db', # :coll_name => 'test-coll', # :options => { :limit => 2 } # }) # # Initialization: # param [ Hash ] spec The specifications for the query. # # option spec :selector [ Hash ] The query selector. # option spec :db_name [ String ] The name of the database on which # the query should be run. # option spec :coll_name [ String ] The name of the collection on which # the query should be run. # option spec :options [ Hash ] Options for the query. # # @since 2.0.0 class Query include Specifiable include Executable include ReadPreference private def query_coll coll_name end end end end end mongo-2.5.1/lib/mongo/operation/read/query/0000755000004100000410000000000013257253113020635 5ustar www-datawww-datamongo-2.5.1/lib/mongo/operation/read/query/result.rb0000644000004100000410000000217613257253113022506 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Operation module Read class Query # Defines custom behaviour of results for a query. # # @since 2.1.0 class Result < Operation::Result # Determine if the query was a success. # # @example Was the query successful? # result.successful? # # @return [ true, false ] If the query was successful. # # @since 2.0.0 def successful? !query_failure? end end end end end end mongo-2.5.1/lib/mongo/index/0000755000004100000410000000000013257253113015664 5ustar www-datawww-datamongo-2.5.1/lib/mongo/index/view.rb0000644000004100000410000002233213257253113017165 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Index # A class representing a view of indexes. # # @since 2.0.0 class View extend Forwardable include Enumerable # @return [ Collection ] collection The indexes collection. attr_reader :collection # @return [ Integer ] batch_size The size of the batch of results # when sending the listIndexes command. attr_reader :batch_size def_delegators :@collection, :cluster, :database, :read_preference, :write_concern, :client def_delegators :cluster, :next_primary # The index key field. # # @since 2.0.0 KEY = 'key'.freeze # The index name field. # # @since 2.0.0 NAME = 'name'.freeze # The mappings of Ruby index options to server options. # # @since 2.0.0 OPTIONS = { :background => :background, :bits => :bits, :bucket_size => :bucketSize, :default_language => :default_language, :expire_after => :expireAfterSeconds, :expire_after_seconds => :expireAfterSeconds, :key => :key, :language_override => :language_override, :max => :max, :min => :min, :name => :name, :partial_filter_expression => :partialFilterExpression, :sparse => :sparse, :sphere_version => :'2dsphereIndexVersion', :storage_engine => :storageEngine, :text_version => :textIndexVersion, :unique => :unique, :version => :v, :weights => :weights, :collation => :collation }.freeze # Drop an index by its name. # # @example Drop an index by its name. # view.drop_one('name_1') # # @param [ String ] name The name of the index. # # @return [ Result ] The response. # # @since 2.0.0 def drop_one(name) raise Error::MultiIndexDrop.new if name == Index::ALL drop_by_name(name) end # Drop all indexes on the collection. # # @example Drop all indexes on the collection. # view.drop_all # # @return [ Result ] The response. # # @since 2.0.0 def drop_all drop_by_name(Index::ALL) end # Creates an index on the collection. # # @example Create a unique index on the collection. # view.create_one({ name: 1 }, { unique: true }) # # @param [ Hash ] keys A hash of field name/direction pairs. # @param [ Hash ] options Options for this index. # # @option options [ true, false ] :unique (false) If true, this index will enforce # a uniqueness constraint on that field. # @option options [ true, false ] :background (false) If true, the index will be built # in the background (only available for server versions >= 1.3.2 ) # @option options [ true, false ] :drop_dups (false) If creating a unique index on # this collection, this option will keep the first document the database indexes # and drop all subsequent documents with duplicate values on this field. # @option options [ Integer ] :bucket_size (nil) For use with geoHaystack indexes. # Number of documents to group together within a certain proximity to a given # longitude and latitude. # @option options [ Integer ] :max (nil) Specify the max latitude and longitude for # a geo index. # @option options [ Integer ] :min (nil) Specify the min latitude and longitude for # a geo index. # @option options [ Hash ] :partial_filter_expression Specify a filter for a partial # index. # # @note Note that the options listed may be subset of those available. # See the MongoDB documentation for a full list of supported options by server version. # # @return [ Result ] The response. # # @since 2.0.0 def create_one(keys, options = {}) create_many({ key: keys }.merge(options)) end # Creates multiple indexes on the collection. # # @example Create multiple indexes. # view.create_many([ # { key: { name: 1 }, unique: true }, # { key: { age: -1 }, background: true } # ]) # # @note On MongoDB 3.0.0 and higher, the indexes will be created in # parallel on the server. # # @param [ Array ] models The index specifications. Each model MUST # include a :key option. # # @return [ Result ] The result of the command. # # @since 2.0.0 def create_many(*models) server = next_primary client.send(:with_session, @options) do |session| spec = { indexes: normalize_models(models.flatten, server), db_name: database.name, coll_name: collection.name, session: session } spec[:write_concern] = write_concern if server.features.collation_enabled? Operation::Write::Command::CreateIndex.new(spec).execute(server) end end # Convenience method for getting index information by a specific name or # spec. # # @example Get index information by name. # view.get('name_1') # # @example Get index information by the keys. # view.get(name: 1) # # @param [ Hash, String ] keys_or_name The index name or spec. # # @return [ Hash ] The index information. # # @since 2.0.0 def get(keys_or_name) find do |index| (index[NAME] == keys_or_name) || (index[KEY] == normalize_keys(keys_or_name)) end end # Iterate over all indexes for the collection. # # @example Get all the indexes. # view.each do |index| # ... # end # # @since 2.0.0 def each(&block) server = next_primary(false) session = client.send(:get_session, @options) result = send_initial_query(server, session) cursor = Cursor.new(self, result, server, session: session) cursor.each do |doc| yield doc end if block_given? cursor.to_enum end # Create the new index view. # # @example Create the new index view. # View::Index.new(collection) # # @param [ Collection ] collection The collection. # @param [ Hash ] options Options for getting a list of indexes. # Only relevant for when the listIndexes command is used with server # versions >=2.8. # # @option options [ Integer ] :batch_size The batch size for results # returned from the listIndexes command. # # @since 2.0.0 def initialize(collection, options = {}) @collection = collection @batch_size = options[:batch_size] @options = options end private def drop_by_name(name) client.send(:with_session, @options) do |session| spec = { db_name: database.name, coll_name: collection.name, index_name: name, session: session } server = next_primary spec[:write_concern] = write_concern if server.features.collation_enabled? Operation::Write::Command::DropIndex.new(spec).execute(server) end end def index_name(spec) spec.to_a.join('_') end def indexes_spec(session) { selector: { listIndexes: collection.name, cursor: batch_size ? { batchSize: batch_size } : {} }, coll_name: collection.name, db_name: database.name, session: session } end def initial_query_op(session) Operation::Commands::Indexes.new(indexes_spec(session)) end def limit; -1; end def normalize_keys(spec) return false if spec.is_a?(String) Options::Mapper.transform_keys_to_strings(spec) end def normalize_models(models, server) with_generated_names(models, server).map do |model| Options::Mapper.transform(model, OPTIONS) end end def send_initial_query(server, session) initial_query_op(session).execute(server) end def with_generated_names(models, server) models.dup.each do |model| validate_collation!(model, server) unless model[:name] model[:name] = index_name(model[:key]) end end end def validate_collation!(model, server) if (model[:collation] || model[Operation::COLLATION]) && !server.features.collation_enabled? raise Error::UnsupportedCollation.new end end end end end mongo-2.5.1/lib/mongo/server/0000755000004100000410000000000013257253113016063 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server/connection.rb0000644000004100000410000002023313257253113020547 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server # This class models the socket connections for servers and their behavior. # # @since 2.0.0 class Connection include Connectable include Monitoring::Publishable include Retryable extend Forwardable # The ping command. # # @since 2.1.0 PING = { :ping => 1 }.freeze # The ping command for an OP_MSG (server versions >= 3.6). # # @since 2.5.0 PING_OP_MSG = { :ping => 1, '$db' => Database::ADMIN }.freeze # Ping message. # # @since 2.1.0 PING_MESSAGE = Protocol::Query.new(Database::ADMIN, Database::COMMAND, PING, :limit => -1) # Ping message as an OP_MSG (server versions >= 3.6). # # @since 2.5.0 PING_OP_MSG_MESSAGE = Protocol::Msg.new([:none], {}, PING_OP_MSG) # The ping message as raw bytes. # # @since 2.1.0 PING_BYTES = PING_MESSAGE.serialize.to_s.freeze # The ping OP_MSG message as raw bytes (server versions >= 3.6). # # @since 2.5.0 PING_OP_MSG_BYTES = PING_OP_MSG_MESSAGE.serialize.to_s.freeze # The last time the connection was checked back into a pool. # # @since 2.5.0 attr_reader :last_checkin def_delegators :@server, :features, :max_bson_object_size, :max_message_size, :mongos?, :app_metadata, :compressor, :cluster_time, :update_cluster_time # Tell the underlying socket to establish a connection to the host. # # @example Connect to the host. # connection.connect! # # @note This method mutates the connection class by setting a socket if # one previously did not exist. # # @return [ true ] If the connection succeeded. # # @since 2.0.0 def connect! unless socket && socket.connectable? @socket = address.socket(socket_timeout, ssl_options) address.connect_socket!(socket) handshake! authenticate! end true end # Disconnect the connection. # # @example Disconnect from the host. # connection.disconnect! # # @note This method mutates the connection by setting the socket to nil # if the closing succeeded. # # @return [ true ] If the disconnect succeeded. # # @since 2.0.0 def disconnect! @auth_mechanism = nil @last_checkin = nil if socket socket.close @socket = nil end true end # Dispatch the provided messages to the connection. If the last message # requires a response a reply will be returned. # # @example Dispatch the messages. # connection.dispatch([ insert, command ]) # # @note This method is named dispatch since 'send' is a core Ruby method on # all objects. # # @param [ Array ] messages The messages to dispatch. # @param [ Integer ] operation_id The operation id to link messages. # # @return [ Protocol::Message ] The reply if needed. # # @since 2.0.0 def dispatch(messages, operation_id = nil) if monitoring.subscribers?(Monitoring::COMMAND) publish_command(messages, operation_id || Monitoring.next_operation_id) do |msgs| deliver(msgs) end else deliver(messages) end end # Initialize a new socket connection from the client to the server. # # @api private # # @example Create the connection. # Connection.new(server) # # @note Connection must never be directly instantiated outside of a # Server. # # @param [ Mongo::Server ] server The server the connection is for. # @param [ Hash ] options The connection options. # # @since 2.0.0 def initialize(server, options = {}) @address = server.address @monitoring = server.monitoring @options = options.freeze @server = server @ssl_options = options.reject { |k, v| !k.to_s.start_with?(SSL) } @socket = nil @last_checkin = nil @auth_mechanism = nil @pid = Process.pid end # Ping the connection to see if the server is responding to commands. # This is non-blocking on the server side. # # @example Ping the connection. # connection.ping # # @note This uses a pre-serialized ping message for optimization. # # @return [ true, false ] If the server is accepting connections. # # @since 2.1.0 def ping bytes = features.op_msg_enabled? ? PING_OP_MSG_BYTES : PING_BYTES ensure_connected do |socket| socket.write(bytes) reply = Protocol::Message.deserialize(socket, max_message_size) reply.documents[0][Operation::Result::OK] == 1 end end # Get the timeout to execute an operation on a socket. # # @example Get the timeout to execute an operation on a socket. # connection.timeout # # @return [ Float ] The operation timeout in seconds. # # @since 2.0.0 def socket_timeout @timeout ||= options[:socket_timeout] end # @deprecated Please use :socket_timeout instead. Will be removed in 3.0.0 alias :timeout :socket_timeout # Record the last checkin time. # # @example Record the checkin time on this connection. # connection.record_checkin! # # @return [ self ] # # @since 2.5.0 def record_checkin! @last_checkin = Time.now self end private def deliver(messages) write(messages) messages.last.replyable? ? read(messages.last.request_id) : nil end def handshake! if socket && socket.connectable? socket.write(app_metadata.ismaster_bytes) response = Protocol::Message.deserialize(socket, max_message_size).documents[0] min_wire_version = response[Description::MIN_WIRE_VERSION] || Description::LEGACY_WIRE_VERSION max_wire_version = response[Description::MAX_WIRE_VERSION] || Description::LEGACY_WIRE_VERSION features = Description::Features.new(min_wire_version..max_wire_version) @auth_mechanism = (features.scram_sha_1_enabled? || @server.features.scram_sha_1_enabled?) ? :scram : :mongodb_cr end end def authenticate! if options[:user] || options[:auth_mech] user = Auth::User.new(Options::Redacted.new(:auth_mech => default_mechanism, :client_key => @client_key).merge(options)) @server.handle_auth_failure! do reply = Auth.get(user).login(self) @client_key ||= user.send(:client_key) if user.mechanism == :scram reply end end end def default_mechanism @auth_mechanism || (@server.features.scram_sha_1_enabled? ? :scram : :mongodb_cr) end def write(messages, buffer = BSON::ByteBuffer.new) start_size = 0 messages.each do |message| message.compress!(compressor, options[:zlib_compression_level]).serialize(buffer, max_bson_object_size) if max_message_size && (buffer.length - start_size) > max_message_size raise Error::MaxMessageSize.new(max_message_size) start_size = buffer.length end end ensure_connected{ |socket| socket.write(buffer.to_s) } end end end end mongo-2.5.1/lib/mongo/server/context.rb0000644000004100000410000000404613257253113020100 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server # Represents a context in which messages are sent to the server on a # connection. # # @since 2.0.0 # # @deprecated Will be removed in version 3.0 class Context extend Forwardable # @return [ Mongo::Server ] server The server the context is for. attr_reader :server # Delegate state checks to the server. def_delegators :@server, :cluster, :features, :max_wire_version, :max_write_batch_size, :mongos?, :primary?, :secondary?, :standalone? # Instantiate a server context. # # @example Instantiate a server context. # Mongo::Server::Context.new(server) # # @param [ Mongo::Server ] server The server the context is for. # # @since 2.0.0 def initialize(server) @server = server end # Execute a block of code with a connection, that is checked out of the # pool and then checked back in. # # @example Send a message with the connection. # context.with_connection do |connection| # connection.dispatch([ command ]) # end # # @return [ Object ] The result of the block execution. # # @since 2.0.0 def with_connection(&block) server.pool.with_connection(&block) end end end end mongo-2.5.1/lib/mongo/server/monitor.rb0000644000004100000410000001316713257253113020107 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server/monitor/connection' module Mongo class Server # This object is responsible for keeping server status up to date, running in # a separate thread as to not disrupt other operations. # # @since 2.0.0 class Monitor include Loggable extend Forwardable # The default time for a server to refresh its status is 10 seconds. # # @since 2.0.0 HEARTBEAT_FREQUENCY = 10.freeze # The minimum time between forced server scans. Is # minHeartbeatFrequencyMS in the SDAM spec. # # @since 2.0.0 MIN_SCAN_FREQUENCY = 0.5.freeze # The weighting factor (alpha) for calculating the average moving round trip time. # # @since 2.0.0 RTT_WEIGHT_FACTOR = 0.2.freeze # @return [ Mongo::Connection ] connection The connection to use. attr_reader :connection # @return [ Server::Description ] description The server # description the monitor refreshes. attr_reader :description # @return [ Description::Inspector ] inspector The description inspector. attr_reader :inspector # @return [ Hash ] options The server options. attr_reader :options # @return [ Time ] last_scan The time of the last server scan. # # @since 2.4.0 attr_reader :last_scan # The compressor is determined during the handshake, so it must be an attribute # of the connection. def_delegators :connection, :compressor # Force the monitor to immediately do a check of its server. # # @example Force a scan. # monitor.scan! # # @return [ Description ] The updated description. # # @since 2.0.0 def scan! throttle_scan_frequency! @description = inspector.run(description, *ismaster) end # Get the refresh interval for the server. This will be defined via an option # or will default to 5. # # @example Get the refresh interval. # server.heartbeat_frequency # # @return [ Integer ] The heartbeat frequency, in seconds. # # @since 2.0.0 def heartbeat_frequency @heartbeat_frequency ||= options[:heartbeat_frequency] || HEARTBEAT_FREQUENCY end # Create the new server monitor. # # @api private # # @example Create the server monitor. # Mongo::Server::Monitor.new(address, listeners) # # @note Monitor must never be directly instantiated outside of a Server. # # @param [ Address ] address The address to monitor. # @param [ Event::Listeners ] listeners The event listeners. # @param [ Hash ] options The options. # # @since 2.0.0 def initialize(address, listeners, options = {}) @description = Description.new(address, {}) @inspector = Description::Inspector.new(listeners) @options = options.freeze @connection = Connection.new(address, options) @last_round_trip_time = nil @last_scan = nil @mutex = Mutex.new end # Runs the server monitor. Refreshing happens on a separate thread per # server. # # @example Run the monitor. # monitor.run # # @return [ Thread ] The thread the monitor runs on. # # @since 2.0.0 def run! @thread = Thread.new(heartbeat_frequency) do |i| loop do sleep(i) scan! end end end # Stops the server monitor. Kills the thread so it doesn't continue # taking memory and sending commands to the connection. # # @example Stop the monitor. # monitor.stop! # # @return [ Boolean ] Is the Thread stopped? # # @since 2.0.0 def stop! connection.disconnect! && @thread.kill && @thread.stop? end # Restarts the server monitor unless the current thread is alive. # # @example Restart the monitor. # monitor.restart! # # @return [ Thread ] The thread the monitor runs on. # # @since 2.1.0 def restart! @thread.alive? ? @thread : run! end private def average_round_trip_time(start) new_rtt = Time.now - start RTT_WEIGHT_FACTOR * new_rtt + (1 - RTT_WEIGHT_FACTOR) * (@last_round_trip_time || new_rtt) end def calculate_average_round_trip_time(start) @last_round_trip_time = average_round_trip_time(start) end def ismaster @mutex.synchronize do start = Time.now begin return connection.ismaster, calculate_average_round_trip_time(start) rescue Exception => e log_debug(e.message) return {}, calculate_average_round_trip_time(start) end end end def throttle_scan_frequency! if @last_scan difference = (Time.now - @last_scan) throttle_time = (MIN_SCAN_FREQUENCY - difference) sleep(throttle_time) if throttle_time > 0 end @last_scan = Time.now end end end end mongo-2.5.1/lib/mongo/server/connection_pool.rb0000644000004100000410000000741313257253113021605 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server/connection_pool/queue' module Mongo class Server # Represents a connection pool for server connections. # # @since 2.0.0 class ConnectionPool include Loggable extend Forwardable # @return [ Hash ] options The pool options. attr_reader :options def_delegators :queue, :close_stale_sockets! # Check a connection back into the pool. Will pull the connection from a # thread local stack that should contain it after it was checked out. # # @example Checkin the thread's connection to the pool. # pool.checkin # # @since 2.0.0 def checkin(connection) queue.enqueue(connection) end # Check a connection out from the pool. If a connection exists on the same # thread it will get that connection, otherwise it will dequeue a # connection from the queue and pin it to this thread. # # @example Check a connection out from the pool. # pool.checkout # # @return [ Mongo::Pool::Connection ] The checked out connection. # # @since 2.0.0 def checkout queue.dequeue end # Disconnect the connection pool. # # @example Disconnect the connection pool. # pool.disconnect! # # @return [ true ] true. # # @since 2.1.0 def disconnect! queue.disconnect! end # Create the new connection pool. # # @example Create the new connection pool. # Pool.new(timeout: 0.5) do # Connection.new # end # # @note A block must be passed to set up the connections on initialization. # # @param [ Hash ] options The connection pool options. # # @since 2.0.0 def initialize(options = {}, &block) @options = options.freeze @queue = Queue.new(options, &block) end # Get a pretty printed string inspection for the pool. # # @example Inspect the pool. # pool.inspect # # @return [ String ] The pool inspection. # # @since 2.0.0 def inspect "#" end # Yield the block to a connection, while handling checkin/checkout logic. # # @example Execute with a connection. # pool.with_connection do |connection| # connection.read # end # # @return [ Object ] The result of the block. # # @since 2.0.0 def with_connection connection = checkout yield(connection) ensure checkin(connection) if connection end protected attr_reader :queue private class << self # Get a connection pool for the provided server. # # @example Get a connection pool. # Mongo::Pool.get(server) # # @param [ Mongo::Server ] server The server. # # @return [ Mongo::Pool ] The connection pool. # # @since 2.0.0 def get(server) ConnectionPool.new(server.options) do Connection.new(server, server.options) end end end end end end mongo-2.5.1/lib/mongo/server/monitor/0000755000004100000410000000000013257253113017552 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server/monitor/connection.rb0000644000004100000410000001464513257253113022250 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Monitor # This class models the monitor connections and their behavior. # # @since 2.0.0 class Connection include Retryable include Connectable include Loggable # The command used for determining server status. # # @since 2.2.0 ISMASTER = { :ismaster => 1 }.freeze # The command used for determining server status formatted for an OP_MSG (server versions >= 3.6). # # @since 2.5.0 ISMASTER_OP_MSG = { :ismaster => 1, '$db' => Database::ADMIN }.freeze # The constant for the ismaster command. # # @since 2.2.0 ISMASTER_MESSAGE = Protocol::Query.new(Database::ADMIN, Database::COMMAND, ISMASTER, :limit => -1) # The constant for the ismaster command as an OP_MSG (server versions >= 3.6). # # @since 2.5.0 ISMASTER_OP_MSG_MESSAGE = Protocol::Msg.new([:none], {}, ISMASTER_OP_MSG) # The raw bytes for the ismaster message. # # @since 2.2.0 ISMASTER_BYTES = ISMASTER_MESSAGE.serialize.to_s.freeze # The raw bytes for the ismaster OP_MSG message (server versions >= 3.6). # # @since 2.5.0 ISMASTER_OP_MSG_BYTES = ISMASTER_OP_MSG_MESSAGE.serialize.to_s.freeze # The default time in seconds to timeout a connection attempt. # # @since 2.1.2 # # @deprecated Please use Server::CONNECT_TIMEOUT instead. Will be removed in 3.0.0 CONNECT_TIMEOUT = 10.freeze # Key for compression algorithms in the response from the server during handshake. # # @since 2.5.0 COMPRESSION = 'compression'.freeze # Warning message that the server has no compression algorithms in common with those requested # by the client. # # @since 2.5.0 COMPRESSION_WARNING = 'The server has no compression algorithms in common with those requested. ' + 'Compression will not be used.'.freeze # The compressor, which is determined during the handshake. # # @since 2.5.0 attr_reader :compressor # Send the preserialized ismaster call. # # @example Send a preserialized ismaster message. # connection.ismaster # # @return [ BSON::Document ] The ismaster result. # # @since 2.2.0 def ismaster ensure_connected do |socket| read_with_one_retry do socket.write(ISMASTER_BYTES) Protocol::Message.deserialize(socket).documents[0] end end end # Tell the underlying socket to establish a connection to the host. # # @example Connect to the host. # connection.connect! # # @note This method mutates the connection class by setting a socket if # one previously did not exist. # # @return [ true ] If the connection succeeded. # # @since 2.0.0 def connect! unless socket && socket.connectable? @socket = address.socket(socket_timeout, ssl_options) address.connect_socket!(socket) handshake! end true end # Disconnect the connection. # # @example Disconnect from the host. # connection.disconnect! # # @note This method mutates the connection by setting the socket to nil # if the closing succeeded. # # @return [ true ] If the disconnect succeeded. # # @since 2.0.0 def disconnect! if socket socket.close @socket = nil end true end # Initialize a new socket connection from the client to the server. # # @api private # # @example Create the connection. # Connection.new(address) # # @note Connection must never be directly instantiated outside of a # Monitor. # # @param [ Mongo::Address ] address The address the connection is for. # @param [ Hash ] options The connection options. # # @since 2.0.0 def initialize(address, options = {}) @address = address @options = options.freeze @app_metadata = options[:app_metadata] @ssl_options = options.reject { |k, v| !k.to_s.start_with?(SSL) } @socket = nil @pid = Process.pid @compressor = nil end # Get the socket timeout. # # @example Get the socket timeout. # connection.socket_timeout # # @return [ Float ] The socket timeout in seconds. Note that the Monitor's connection # uses the connect timeout value for calling ismaster. See the Server Discovery and # Monitoring specification for details. # # @since 2.4.3 def socket_timeout @timeout ||= options[:connect_timeout] || Server::CONNECT_TIMEOUT end # @deprecated Please use :socket_timeout instead. Will be removed in 3.0.0 alias :timeout :socket_timeout private def set_compressor!(reply) server_compressors = reply[COMPRESSION] if options[:compressors] if intersection = (server_compressors & options[:compressors]) @compressor = intersection[0] else log_warn(COMPRESSION_WARNING) end end end def handshake! if @app_metadata socket.write(@app_metadata.ismaster_bytes) reply = Protocol::Message.deserialize(socket, Mongo::Protocol::Message::MAX_MESSAGE_SIZE).documents[0] set_compressor!(reply) reply end end end end end end mongo-2.5.1/lib/mongo/server/description.rb0000644000004100000410000004321013257253113020733 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server/description/features' require 'mongo/server/description/inspector' module Mongo class Server # Represents a description of the server, populated by the result of the # ismaster command. # # @since 2.0.0 class Description # Constant for reading arbiter info from config. # # @since 2.0.0 ARBITER = 'arbiterOnly'.freeze # Constant for reading arbiters info from config. # # @since 2.0.0 ARBITERS = 'arbiters'.freeze # Constant for reading hidden info from config. # # @since 2.0.0 HIDDEN = 'hidden'.freeze # Constant for reading hosts info from config. # # @since 2.0.0 HOSTS = 'hosts'.freeze # Constant for the key for the message value. # # @since 2.0.0 MESSAGE = 'msg'.freeze # Constant for the message that indicates a sharded cluster. # # @since 2.0.0 MONGOS_MESSAGE = 'isdbgrid'.freeze # Constant for determining ghost servers. # # @since 2.0.0 REPLICA_SET = 'isreplicaset'.freeze # Constant for reading max bson size info from config. # # @since 2.0.0 MAX_BSON_OBJECT_SIZE = 'maxBsonObjectSize'.freeze # Constant for reading max message size info from config. # # @since 2.0.0 MAX_MESSAGE_BYTES = 'maxMessageSizeBytes'.freeze # Constant for the max wire version. # # @since 2.0.0 MAX_WIRE_VERSION = 'maxWireVersion'.freeze # Constant for min wire version. # # @since 2.0.0 MIN_WIRE_VERSION = 'minWireVersion'.freeze # Constant for reading max write batch size. # # @since 2.0.0 MAX_WRITE_BATCH_SIZE = 'maxWriteBatchSize'.freeze # Constant for the lastWrite subdocument. # # @since 2.4.0 LAST_WRITE = 'lastWrite'.freeze # Constant for the lastWriteDate field in the lastWrite subdocument. # # @since 2.4.0 LAST_WRITE_DATE = 'lastWriteDate'.freeze # Constant for reading the me field. # # @since 2.1.0 ME = 'me'.freeze # Default max write batch size. # # @since 2.0.0 DEFAULT_MAX_WRITE_BATCH_SIZE = 1000.freeze # The legacy wire protocol version. # # @since 2.0.0 LEGACY_WIRE_VERSION = 0.freeze # Constant for reading passive info from config. # # @since 2.0.0 PASSIVE = 'passive'.freeze # Constant for reading the passive server list. # # @since 2.0.0 PASSIVES = 'passives'.freeze # Constant for reading primary info from config. # # @since 2.0.0 PRIMARY = 'ismaster'.freeze # Constant for reading primary host field from config. # # @since 2.5.0 PRIMARY_HOST = 'primary'.freeze # Constant for reading secondary info from config. # # @since 2.0.0 SECONDARY = 'secondary'.freeze # Constant for reading replica set name info from config. # # @since 2.0.0 SET_NAME = 'setName'.freeze # Constant for reading tags info from config. # # @since 2.0.0 TAGS = 'tags'.freeze # Constant for reading electionId info from config. # # @since 2.1.0 ELECTION_ID = 'electionId'.freeze # Constant for reading setVersion info from config. # # @since 2.2.2 SET_VERSION = 'setVersion'.freeze # Constant for reading localTime info from config. # # @since 2.1.0 LOCAL_TIME = 'localTime'.freeze # Constant for reading operationTime info from config. # # @since 2.5.0 OPERATION_TIME = 'operationTime'.freeze # Constant for reading logicalSessionTimeoutMinutes info from config. # # @since 2.5.0 LOGICAL_SESSION_TIMEOUT_MINUTES = 'logicalSessionTimeoutMinutes'.freeze # Fields to exclude when comparing two descriptions. # # @since 2.0.6 EXCLUDE_FOR_COMPARISON = [ LOCAL_TIME, LAST_WRITE, OPERATION_TIME, Operation::CLUSTER_TIME ].freeze # @return [ Address ] address The server's address. attr_reader :address # @return [ Hash ] The actual result from the ismaster command. attr_reader :config # @return [ Features ] features The features for the server. attr_reader :features # @return [ Float ] The moving average time the ismaster call took to complete. attr_reader :average_round_trip_time # Will return true if the server is an arbiter. # # @example Is the server an arbiter? # description.arbiter? # # @return [ true, false ] If the server is an arbiter. # # @since 2.0.0 def arbiter? !!config[ARBITER] && !replica_set_name.nil? end # Get a list of all arbiters in the replica set. # # @example Get the arbiters in the replica set. # description.arbiters # # @return [ Array ] The arbiters in the set. # # @since 2.0.0 def arbiters @arbiters ||= (config[ARBITERS] || []).map { |s| s.downcase } end # Is the server a ghost in a replica set? # # @example Is the server a ghost? # description.ghost? # # @return [ true, false ] If the server is a ghost. # # @since 2.0.0 def ghost? !!config[REPLICA_SET] end # Will return true if the server is hidden. # # @example Is the server hidden? # description.hidden? # # @return [ true, false ] If the server is hidden. # # @since 2.0.0 def hidden? !!config[HIDDEN] end # Get a list of all servers in the replica set. # # @example Get the servers in the replica set. # description.hosts # # @return [ Array ] The servers in the set. # # @since 2.0.0 def hosts @hosts ||= (config[HOSTS] || []).map { |s| s.downcase } end # Instantiate the new server description from the result of the ismaster # command. # # @example Instantiate the new description. # Description.new(address, { 'ismaster' => true }, 0.5) # # @param [ Address ] address The server address. # @param [ Hash ] config The result of the ismaster command. # @param [ Float ] average_round_trip_time The moving average time (sec) the ismaster # call took to complete. # # @since 2.0.0 def initialize(address, config = {}, average_round_trip_time = 0) @address = address @config = config @features = Features.new(wire_versions, me || @address.to_s) @average_round_trip_time = average_round_trip_time end # Inspect the server description. # # @example Inspect the server description # description.inspect # # @return [ String ] The inspection. # # @since 2.0.0 def inspect "#" end # Get the max BSON object size for this server version. # # @example Get the max BSON object size. # description.max_bson_object_size # # @return [ Integer ] The maximum object size in bytes. # # @since 2.0.0 def max_bson_object_size config[MAX_BSON_OBJECT_SIZE] end # Get the max message size for this server version. # # @example Get the max message size. # description.max_message_size # # @return [ Integer ] The maximum message size in bytes. # # @since 2.0.0 def max_message_size config[MAX_MESSAGE_BYTES] end # Get the maximum batch size for writes. # # @example Get the max batch size. # description.max_write_batch_size # # @return [ Integer ] The max batch size. # # @since 2.0.0 def max_write_batch_size config[MAX_WRITE_BATCH_SIZE] || DEFAULT_MAX_WRITE_BATCH_SIZE end # Get the maximum wire version. # # @example Get the max wire version. # description.max_wire_version # # @return [ Integer ] The max wire version supported. # # @since 2.0.0 def max_wire_version config[MAX_WIRE_VERSION] || LEGACY_WIRE_VERSION end # Get the minimum wire version. # # @example Get the min wire version. # description.min_wire_version # # @return [ Integer ] The min wire version supported. # # @since 2.0.0 def min_wire_version config[MIN_WIRE_VERSION] || LEGACY_WIRE_VERSION end # Get the me field value. # # @example Get the me field value. # description.me # # @return [ String ] The me field. # # @since 2.1.0 def me config[ME] end # Get the tags configured for the server. # # @example Get the tags. # description.tags # # @return [ Hash ] The tags of the server. # # @since 2.0.0 def tags config[TAGS] || {} end # Get the electionId from the config. # # @example Get the electionId. # description.election_id # # @return [ BSON::ObjectId ] The election id. # # @since 2.1.0 def election_id config[ELECTION_ID] end # Get the setVersion from the config. # # @example Get the setVersion. # description.set_version # # @return [ Integer ] The set version. # # @since 2.2.2 def set_version config[SET_VERSION] end # Get the lastWriteDate from the lastWrite subdocument in the config. # # @example Get the lastWriteDate value. # description.last_write_date # # @return [ Time ] The last write date. # # @since 2.4.0 def last_write_date config[LAST_WRITE][LAST_WRITE_DATE] if config[LAST_WRITE] end # Get the logicalSessionTimeoutMinutes from the config. # # @example Get the logicalSessionTimeoutMinutes value in minutes. # description.logical_session_timeout # # @return [ Integer, nil ] The logical session timeout in minutes. # # @since 2.5.0 def logical_session_timeout config[LOGICAL_SESSION_TIMEOUT_MINUTES] if config[LOGICAL_SESSION_TIMEOUT_MINUTES] end # Is the server a mongos? # # @example Is the server a mongos? # description.mongos? # # @return [ true, false ] If the server is a mongos. # # @since 2.0.0 def mongos? config[MESSAGE] == MONGOS_MESSAGE end # Is the description of type other. # # @example Is the description of type other. # description.other? # # @return [ true, false ] If the description is other. # # @since 2.0.0 def other? (!primary? && !secondary? && !passive? && !arbiter?) || (hidden? && !replica_set_name.nil?) end # Will return true if the server is passive. # # @example Is the server passive? # description.passive? # # @return [ true, false ] If the server is passive. # # @since 2.0.0 def passive? !!config[PASSIVE] end # Get a list of the passive servers in the cluster. # # @example Get the passives. # description.passives # # @return [ Array ] The list of passives. # # @since 2.0.0 def passives @passives ||= (config[PASSIVES] || []).map { |s| s.downcase } end # Will return true if the server is a primary. # # @example Is the server a primary? # description.primary? # # @return [ true, false ] If the server is a primary. # # @since 2.0.0 def primary? !!config[PRIMARY] && (config[PRIMARY_HOST].nil? || config[PRIMARY_HOST] == address.to_s) && !replica_set_name.nil? end # Get the name of the replica set the server belongs to, returns nil if # none. # # @example Get the replica set name. # description.replica_set_name # # @return [ String, nil ] The name of the replica set. # # @since 2.0.0 def replica_set_name config[SET_NAME] end # Get a list of all servers known to the cluster. # # @example Get all servers. # description.servers # # @return [ Array ] The list of all servers. # # @since 2.0.0 def servers hosts + arbiters + passives end # Will return true if the server is a secondary. # # @example Is the server a secondary? # description.secondary? # # @return [ true, false ] If the server is a secondary. # # @since 2.0.0 def secondary? !!config[SECONDARY] && !replica_set_name.nil? end # Returns the server type as a symbol. # # @example Get the server type. # description.server_type # # @return [ Symbol ] The server type. # # @since 2.4.0 def server_type return :arbiter if arbiter? return :ghost if ghost? return :sharded if mongos? return :primary if primary? return :secondary if secondary? return :standalone if standalone? :unknown end # Is this server a standalone server? # # @example Is the server standalone? # description.standalone? # # @return [ true, false ] If the server is standalone. # # @since 2.0.0 def standalone? replica_set_name.nil? && !mongos? && !ghost? && !unknown? end # Is the server description currently unknown? # # @example Is the server description unknown? # description.unknown? # # @return [ true, false ] If the server description is unknown. # # @since 2.0.0 def unknown? config.empty? || (config[Operation::Result::OK] && config[Operation::Result::OK] != 1) end # A result from another server's ismaster command before this server has # refreshed causes the need for this description to become unknown before # the next refresh. # # @example Force an unknown state. # description.unknown! # # @return [ true ] Always true. # # @since 2.0.0 def unknown! @config = {} and true end # Get the range of supported wire versions for the server. # # @example Get the wire version range. # description.wire_versions # # @return [ Range ] The wire version range. # # @since 2.0.0 def wire_versions min_wire_version..max_wire_version end # Is this description from the given server. # # @example Check if the description is from a given server. # description.is_server?(server) # # @return [ true, false ] If the description is from the server. # # @since 2.0.6 def is_server?(server) address == server.address end # Is a server included in this description's list of servers. # # @example Check if a server is in the description list of servers. # description.lists_server?(server) # # @return [ true, false ] If a server is in the description's list # of servers. # # @since 2.0.6 def lists_server?(server) servers.include?(server.address.to_s) end # Does this description correspond to a replica set member. # # @example Check if the description is from a replica set member. # description.replica_set_member? # # @return [ true, false ] If the description is from a replica set # member. # # @since 2.0.6 def replica_set_member? !(standalone? || mongos?) end # Check if there is a mismatch between the address host and the me field. # # @example Check if there is a mismatch. # description.me_mismatch? # # @return [ true, false ] If there is a mismatch between the me field and the address host. # # @since 2.0.6 def me_mismatch? !!(address.to_s != me if me) end # Check equality of two descriptions. # # @example Check description equality. # description == other # # @param [ Object ] other The other description. # # @return [ true, false ] Whether the objects are equal. # # @since 2.0.6 def ==(other) return false if self.class != other.class return false if unknown? || other.unknown? compare_config(other) end alias_method :eql?, :== private def compare_config(other) config.keys.all? do |k| config[k] == other.config[k] || EXCLUDE_FOR_COMPARISON.include?(k) end end end end end mongo-2.5.1/lib/mongo/server/connection_pool/0000755000004100000410000000000013257253113021253 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server/connection_pool/queue.rb0000644000004100000410000001617013257253113022731 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class ConnectionPool # A LIFO queue of connections to be used by the connection pool. This is # based on mperham's connection pool. # # @since 2.0.0 class Queue extend Forwardable # Size of the queue delegates to the wrapped queue. def_delegators :queue, :size # The default max size for the queue. MAX_SIZE = 5.freeze # The default min size for the queue. MIN_SIZE = 1.freeze # The default timeout, in seconds, to wait for a connection. WAIT_TIMEOUT = 1.freeze # @return [ Array ] queue The underlying array of connections. attr_reader :queue # @return [ Mutex ] mutex The mutex used for synchronization. attr_reader :mutex # @return [ Hash ] options The options. attr_reader :options # @return [ ConditionVariable ] resource The resource. attr_reader :resource # Dequeue a connection from the queue, waiting for the provided timeout # for an item if none is in the queue. # # @example Dequeue a connection. # queue.dequeue # # @return [ Mongo::Pool::Connection ] The next connection. # # @since 2.0.0 def dequeue mutex.synchronize do dequeue_connection end end # Disconnect all connections in the queue. # # @example Disconnect all connections. # queue.disconnect! # # @return [ true ] Always true. # # @since 2.1.0 def disconnect! mutex.synchronize do queue.each{ |connection| connection.disconnect! } true end end # Enqueue a connection in the queue. # # @example Enqueue a connection. # queue.enqueue(connection) # # @param [ Mongo::Pool::Connection ] connection The connection. # # @since 2.0.0 def enqueue(connection) mutex.synchronize do queue.unshift(connection.record_checkin!) resource.broadcast end end # Initialize the new queue. Will yield the block the number of times for # the initial size of the queue. # # @example Create the queue. # Mongo::Pool::Queue.new(max_pool_size: 5) { Connection.new } # # @param [ Hash ] options The options. # # @option options [ Integer ] :max_pool_size The maximum size. # @option options [ Integer ] :min_pool_size The minimum size. # @option options [ Float ] :wait_queue_timeout The time to wait, in # seconds, for a free connection. # # @since 2.0.0 def initialize(options = {}, &block) @block = block @connections = 0 @options = options @queue = Array.new(min_size) { create_connection } @mutex = Mutex.new @resource = ConditionVariable.new end # Get a pretty printed string inspection for the queue. # # @example Inspect the queue. # queue.inspect # # @return [ String ] The queue inspection. # # @since 2.0.0 def inspect "#" end # Get the maximum size of the queue. # # @example Get the max size. # queue.max_size # # @return [ Integer ] The maximum size of the queue. # # @since 2.0.0 def max_size @max_size ||= options[:max_pool_size] || MAX_SIZE end # Get the minimum size of the queue. # # @example Get the min size. # queue.min_size # # @return [ Integer ] The minimum size of the queue. # # @since 2.0.0 def min_size @min_size ||= options[:min_pool_size] || MIN_SIZE end # The time to wait, in seconds, for a connection to become available. # # @example Get the wait timeout. # queue.wait_timeout # # @return [ Float ] The queue wait timeout. # # @since 2.0.0 def wait_timeout @wait_timeout ||= options[:wait_queue_timeout] || WAIT_TIMEOUT end # The maximum seconds a socket can remain idle since it has been checked in to the pool. # # @example Get the max idle time. # queue.max_idle_time # # @return [ Float ] The max socket idle time in seconds. # # @since 2.5.0 def max_idle_time @max_idle_time ||= options[:max_idle_time] end # Close sockets that have been open for longer than the max idle time, if the # option is set. # # @example Close the stale sockets # queue.close_stale_sockets! # # @since 2.5.0 def close_stale_sockets! return unless max_idle_time to_refresh = [] queue.each do |connection| if last_checkin = connection.last_checkin if (Time.now - last_checkin) > max_idle_time to_refresh << connection end end end mutex.synchronize do num_checked_out = @connections - queue.size min_size_delta = [(min_size - num_checked_out), 0].max to_refresh.each do |connection| if queue.include?(connection) connection.disconnect! if queue.index(connection) < min_size_delta begin; connection.connect!; rescue; end end end end end end private def dequeue_connection deadline = Time.now + wait_timeout loop do return queue.shift unless queue.empty? connection = create_connection return connection if connection wait_for_next!(deadline) end end def create_connection if @connections < max_size @connections += 1 @block.call end end def wait_for_next!(deadline) wait = deadline - Time.now if wait <= 0 raise Timeout::Error.new("Timed out attempting to dequeue connection after #{wait_timeout} sec.") end resource.wait(mutex, wait) end end end end end mongo-2.5.1/lib/mongo/server/description/0000755000004100000410000000000013257253113020406 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server/description/inspector/0000755000004100000410000000000013257253113022414 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server/description/inspector/description_changed.rb0000644000004100000410000000350113257253113026734 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Description class Inspector # Handles inspecting the result of an ismaster command for servers # added to the cluster. # # @since 2.0.0 class DescriptionChanged include Event::Publisher # Instantiate the server added inspection. # # @example Instantiate the inspection. # ServerAdded.new(listeners) # # @param [ Event::Listeners ] event_listeners The event listeners. # # @since 2.0.0 def initialize(event_listeners) @event_listeners = event_listeners end # Run the server added inspection. # # @example Run the inspection. # ServerAdded.run(description, {}) # # @param [ Description ] description The server description. # @param [ Description ] updated The updated description. # # @since 2.0.0 def run(description, updated) unless (description.unknown? && updated.unknown?) || (description == updated) publish(Event::DESCRIPTION_CHANGED, description, updated) end end end end end end end mongo-2.5.1/lib/mongo/server/description/inspector/member_discovered.rb0000644000004100000410000000370213257253113026421 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Description class Inspector # Handles inspecting the result of an ismaster command to check if this # a server is a member of a known topology. # # @since 2.4.0 class MemberDiscovered include Event::Publisher # Instantiate the member discovered inspection. # # @example Instantiate the inspection. # MemberDiscovered.new(listeners) # # @param [ Event::Listeners ] event_listeners The event listeners. # # @since 2.4.0 def initialize(event_listeners) @event_listeners = event_listeners end # Run the member discovered inspection. # # @example Run the inspection. # MemberDiscovered.run(description, {}) # # @param [ Description ] description The server description. # @param [ Description ] updated The updated description. # # @since 2.4.0 def run(description, updated) if (!description.primary? && updated.primary?) || (!description.mongos? && updated.mongos?) || (description.unknown? && !updated.unknown?) publish(Event::MEMBER_DISCOVERED, description, updated) end end end end end end end mongo-2.5.1/lib/mongo/server/description/inspector/standalone_discovered.rb0000644000004100000410000000343313257253113027303 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Description class Inspector # Handles notifying the cluster that a standalone was discovered. # # @since 2.0.6 class StandaloneDiscovered include Event::Publisher # Instantiate the standalone discovered inspection. # # @example Instantiate the inspection. # StandaloneDiscovered.new(listeners) # # @param [ Event::Listeners ] event_listeners The event listeners. # # @since 2.0.6 def initialize(event_listeners) @event_listeners = event_listeners end # Run the standalone discovered inspection. # # @example Run the inspection. # StandaloneDiscovered.run(description, {}) # # @param [ Description ] description The server description. # @param [ Description ] updated The updated description. # # @since 2.0.6 def run(description, updated) if !description.standalone? && updated.standalone? publish(Event::STANDALONE_DISCOVERED, updated) end end end end end end end mongo-2.5.1/lib/mongo/server/description/inspector/primary_elected.rb0000644000004100000410000000362213257253113026114 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Description class Inspector # Handles inspecting the result of an ismaster command to check if this # server was elected primary. # # @since 2.0.0 # # @deprecated. Will be removed in 3.0 class PrimaryElected include Event::Publisher # Instantiate the primary elected inspection. # # @example Instantiate the inspection. # PrimaryElected.new(listeners) # # @param [ Event::Listeners ] event_listeners The event listeners. # # @since 2.0.0 def initialize(event_listeners) @event_listeners = event_listeners end # Run the primary elected inspection. # # @example Run the inspection. # PrimaryElected.run(description, {}) # # @param [ Description ] description The server description. # @param [ Description ] updated The updated description. # # @since 2.0.0 def run(description, updated) if (!description.primary? && updated.primary?) || (!description.mongos? && updated.mongos?) publish(Event::PRIMARY_ELECTED, updated) end end end end end end end mongo-2.5.1/lib/mongo/server/description/features.rb0000644000004100000410000001015613257253113022554 0ustar www-datawww-data# Copyright (C) 2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server class Description # Defines behaviour around what features a specific server supports. # # @since 2.0.0 class Features # List of features and the wire protocol version they appear in. # # @since 2.0.0 MAPPINGS = { :array_filters => 6, :op_msg => 6, :sessions => 6, :collation => 5, :max_staleness => 5, :find_command => 4, :list_collections => 3, :list_indexes => 3, :scram_sha_1 => 3, :write_command => 2, :users_info => 2 }.freeze # Error message if the server is too old for this version of the driver. # # @since 2.5.0 SERVER_TOO_OLD = "Server at (%s) reports wire version (%s), but this version of the Ruby driver " + "requires at least (%s)." # Error message if the driver is too old for the version of the server. # # @since 2.5.0 DRIVER_TOO_OLD = "Server at (%s) requires wire version (%s), but this version of the Ruby driver " + "only supports up to (%s)." # The wire protocol versions that this version of the driver supports. # # @since 2.0.0 DRIVER_WIRE_VERSIONS = (2..6).freeze # Create the methods for each mapping to tell if they are supported. # # @since 2.0.0 MAPPINGS.each do |name, version| # Determine whether or not the feature is enabled. # # @example Is a feature enabled? # features.list_collections_enabled? # # @return [ true, false ] If the feature is enabled. # # @since 2.0.0 define_method("#{name}_enabled?") do server_wire_versions.include?(MAPPINGS[name]) end end # @return [ Range ] server_wire_versions The server's supported wire # versions. attr_reader :server_wire_versions # Initialize the features. # # @example Initialize the features. # Features.new(0..3) # # @param [ Range ] server_wire_versions The server supported wire # versions. # # @since 2.0.0 def initialize(server_wire_versions, address = nil) @server_wire_versions = server_wire_versions @address = address end # Check that there is an overlap between the driver supported wire version range # and the server wire version range. # # @example Verify the wire version overlap. # features.check_driver_support! # # @raise [ Error::UnsupportedFeatures ] If the wire version range is not covered # by the driver. # # @since 2.5.1 def check_driver_support! if DRIVER_WIRE_VERSIONS.min > @server_wire_versions.max raise Error::UnsupportedFeatures.new(SERVER_TOO_OLD % [@address, @server_wire_versions.max, DRIVER_WIRE_VERSIONS.min]) elsif DRIVER_WIRE_VERSIONS.max < @server_wire_versions.min raise Error::UnsupportedFeatures.new(DRIVER_TOO_OLD % [@address, @server_wire_versions.min, DRIVER_WIRE_VERSIONS.max]) end end end end end end mongo-2.5.1/lib/mongo/server/description/inspector.rb0000644000004100000410000000533213257253113022744 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server/description/inspector/member_discovered' # @deprecated. Will be removed in 3.0 require 'mongo/server/description/inspector/primary_elected' require 'mongo/server/description/inspector/description_changed' require 'mongo/server/description/inspector/standalone_discovered' module Mongo class Server class Description # Handles inspection of an updated server description to determine if # events should be fired. # # @since 2.0.0 class Inspector # Static list of inspections that are performed on the result of an # ismaster command in order to generate the appropriate events for the # changes. # # @since 2.0.0 INSPECTORS = [ Inspector::StandaloneDiscovered, Inspector::DescriptionChanged, Inspector::MemberDiscovered ].freeze # @return [ Array ] inspectors The description inspectors. attr_reader :inspectors # Create the new inspector. # # @example Create the new inspector. # Inspector.new(listeners) # # @param [ Event::Listeners ] listeners The event listeners. # # @since 2.0.0 def initialize(listeners) @inspectors = INSPECTORS.map do |inspector| inspector.new(listeners) end end # Run the server description inspector. # # @example Run the inspector. # inspector.run(description, { 'ismaster' => true }) # # @param [ Description ] description The old description. # @param [ Hash ] ismaster The updated ismaster. # @param [ Float ] average_round_trip_time The moving average round trip time (sec). # # @return [ Description ] The new description. # # @since 2.0.0 def run(description, ismaster, average_round_trip_time) new_description = Description.new(description.address, ismaster, average_round_trip_time) inspectors.each do |inspector| inspector.run(description, new_description) end new_description end end end end end mongo-2.5.1/lib/mongo/server/connectable.rb0000644000004100000410000000543113257253113020670 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Server # This provides common behaviour for connection objects. # # @since 2.0.0 module Connectable # The ssl option prefix. # # @since 2.1.0 SSL = 'ssl'.freeze # The default time in seconds to timeout an operation executed on a socket. # # @since 2.0.0 # # @deprecated Timeouts on Ruby sockets aren't effective so this default option is # no longer used. # Will be removed in driver version 3.0. TIMEOUT = 5.freeze # @return [ Mongo::Address ] address The address to connect to. attr_reader :address # @return [ Hash ] options The passed in options. attr_reader :options # @return [ Integer ] pid The process id when the connection was created. attr_reader :pid # Determine if the server is connectable. This will check not only if the # connection exists, but if messages can send to it successfully. # # @example Is the server connectable? # connection.connectable? # # @return [ true, false ] If the connection is connectable. # # @since 2.1.0 def connectable? begin; ping; rescue; false; end end # Determine if the connection is currently connected. # # @example Is the connection connected? # connection.connected? # # @return [ true, false ] If connected. # # @deprecated Use #connectable? instead def connected? !!@socket && @socket.alive? end protected attr_reader :socket private def ssl_options @ssl_options[:ssl] == true ? @ssl_options : {} end def ensure_connected ensure_same_process! begin connect! result = yield socket success = true result ensure success or disconnect! end end def ensure_same_process! if pid != Process.pid disconnect! @pid = Process.pid end end def read(request_id = nil) ensure_connected do |socket| Protocol::Message.deserialize(socket, max_message_size, request_id) end end end end end mongo-2.5.1/lib/mongo/database.rb0000644000004100000410000001640513257253113016654 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/database/view' module Mongo # Represents a database on the db server and operations that can execute on # it at this level. # # @since 2.0.0 class Database extend Forwardable # The admin database name. # # @since 2.0.0 ADMIN = 'admin'.freeze # The "collection" that database commands operate against. # # @since 2.0.0 COMMAND = '$cmd'.freeze # The default database options. # # @since 2.0.0 DEFAULT_OPTIONS = Options::Redacted.new(:database => ADMIN).freeze # Database name field constant. # # @since 2.1.0 NAME = 'name'.freeze # Databases constant. # # @since 2.1.0 DATABASES = 'databases'.freeze # The name of the collection that holds all the collection names. # # @since 2.0.0 NAMESPACES = 'system.namespaces'.freeze # @return [ Client ] client The database client. attr_reader :client # @return [ String ] name The name of the database. attr_reader :name # @return [ Hash ] options The options. attr_reader :options # Get cluster, read preference, and write concern from client. def_delegators :@client, :cluster, :read_preference, :server_selector, :write_concern # @return [ Mongo::Server ] Get the primary server from the cluster. def_delegators :cluster, :next_primary # Check equality of the database object against another. Will simply check # if the names are the same. # # @example Check database equality. # database == other # # @param [ Object ] other The object to check against. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Database) name == other.name end # Get a collection in this database by the provided name. # # @example Get a collection. # database[:users] # # @param [ String, Symbol ] collection_name The name of the collection. # @param [ Hash ] options The options to the collection. # # @return [ Mongo::Collection ] The collection object. # # @since 2.0.0 def [](collection_name, options = {}) Collection.new(self, collection_name, options) end alias_method :collection, :[] # Get all the names of the non system collections in the database. # # @example Get the collection names. # database.collection_names # # @return [ Array ] The names of all non-system collections. # # @since 2.0.0 def collection_names(options = {}) View.new(self).collection_names(options) end # Get info on all the collections in the database. # # @example Get info on each collection. # database.list_collections # # @return [ Array ] Info for each collection in the database. # # @since 2.0.5 def list_collections View.new(self).list_collections end # Get all the collections that belong to this database. # # @example Get all the collections. # database.collections # # @return [ Array ] All the collections. # # @since 2.0.0 def collections collection_names.map { |name| collection(name) } end # Execute a command on the database. # # @example Execute a command. # database.command(:ismaster => 1) # # @param [ Hash ] operation The command to execute. # @param [ Hash ] opts The command options. # # @option opts :read [ Hash ] The read preference for this command. # # @return [ Hash ] The result of the command execution. def command(operation, opts = {}) preference = ServerSelector.get(opts[:read] || ServerSelector::PRIMARY) server = preference.select_server(cluster) client.send(:with_session, opts) do |session| Operation::Commands::Command.new({ :selector => operation.dup, :db_name => name, :read => preference, :session => session }).execute(server) end end # Drop the database and all its associated information. # # @example Drop the database. # database.drop # # @param [ Hash ] options The options for the operation. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The result of the command. # # @since 2.0.0 def drop(options = {}) operation = { :dropDatabase => 1 } client.send(:with_session, options) do |session| Operation::Commands::DropDatabase.new({ selector: operation, db_name: name, write_concern: write_concern, session: session }).execute(next_primary) end end # Instantiate a new database object. # # @example Instantiate the database. # Mongo::Database.new(client, :test) # # @param [ Mongo::Client ] client The driver client. # @param [ String, Symbol ] name The name of the database. # @param [ Hash ] options The options. # # @raise [ Mongo::Database::InvalidName ] If the name is nil. # # @since 2.0.0 def initialize(client, name, options = {}) raise Error::InvalidDatabaseName.new unless name @client = client @name = name.to_s.freeze @options = options.freeze end # Get a pretty printed string inspection for the database. # # @example Inspect the database. # database.inspect # # @return [ String ] The database inspection. # # @since 2.0.0 def inspect "#" end # Get the Grid "filesystem" for this database. # # @example Get the GridFS. # database.fs # # @return [ Grid::FSBucket ] The GridFS for the database. # # @since 2.0.0 def fs(options = {}) Grid::FSBucket.new(self, options) end # Get the user view for this database. # # @example Get the user view. # database.users # # @return [ View::User ] The user view. # # @since 2.0.0 def users Auth::User::View.new(self) end # Create a database for the provided client, for use when we don't want the # client's original database instance to be the same. # # @api private # # @example Create a database for the client. # Database.create(client) # # @param [ Client ] client The client to create on. # # @return [ Database ] The database. # # @since 2.0.0 def self.create(client) database = Database.new(client, client.options[:database], client.options) client.instance_variable_set(:@database, database) end end end mongo-2.5.1/lib/mongo/server_selector.rb0000644000004100000410000000454513257253113020320 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server_selector/selectable' require 'mongo/server_selector/nearest' require 'mongo/server_selector/primary' require 'mongo/server_selector/primary_preferred' require 'mongo/server_selector/secondary' require 'mongo/server_selector/secondary_preferred' module Mongo # Functionality for getting an object able to select a server, given a preference. # # @since 2.0.0 module ServerSelector extend self # The max latency in seconds between the closest server and other servers # considered for selection. # # @since 2.0.0 LOCAL_THRESHOLD = 0.015.freeze # How long to block for server selection before throwing an exception. # # @since 2.0.0 SERVER_SELECTION_TIMEOUT = 30.freeze # The smallest allowed max staleness value, in seconds. # # @since 2.4.0 SMALLEST_MAX_STALENESS_SECONDS = 90 # Primary read preference. # # @since 2.1.0 PRIMARY = Options::Redacted.new(mode: :primary).freeze # Hash lookup for the selector classes based off the symbols # provided in configuration. # # @since 2.0.0 PREFERENCES = { nearest: Nearest, primary: Primary, primary_preferred: PrimaryPreferred, secondary: Secondary, secondary_preferred: SecondaryPreferred }.freeze # Create a server selector object. # # @example Get a server selector object for selecting a secondary with # specific tag sets. # Mongo::ServerSelector.get(:mode => :secondary, :tag_sets => [{'dc' => 'nyc'}]) # # @param [ Hash ] preference The server preference. # # @since 2.0.0 def get(preference = {}) return preference if PREFERENCES.values.include?(preference.class) PREFERENCES.fetch(preference[:mode] || :primary).new(preference) end end end mongo-2.5.1/lib/mongo/logger.rb0000644000004100000410000000400413257253113016357 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'logger' module Mongo # Provides ability to log messages. # # @since 2.0.0 class Logger class << self # Get the wrapped logger. If none was set will return a default debug # level logger. # # @example Get the wrapped logger. # Mongo::Logger.logger # # @return [ ::Logger ] The wrapped logger. # # @since 2.0.0 def logger @logger ||= default_logger end # Set the logger. # # @example Set the wrapped logger. # Mongo::Logger.logger = logger # # @param [ ::Logger ] other The logger to set. # # @return [ ::Logger ] The wrapped logger. # # @since 2.0.0 def logger=(other) @logger = other end # Get the global logger level. # # @example Get the global logging level. # Mongo::Logger.level # # @return [ Integer ] The log level. # # @since 2.0.0 def level logger.level end # Set the global logger level. # # @example Set the global logging level. # Mongo::Logger.level == Logger::DEBUG # # @return [ Integer ] The log level. # # @since 2.0.0 def level=(level) logger.level = level end private def default_logger logger = ::Logger.new($stdout) logger.level = ::Logger::DEBUG logger end end end end mongo-2.5.1/lib/mongo/client.rb0000644000004100000410000004601613257253113016367 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo # The client is the entry point to the driver and is the main object that # will be interacted with. # # @since 2.0.0 class Client extend Forwardable include Loggable # The options that do not affect the behaviour of a cluster and its # subcomponents. # # @since 2.1.0 CRUD_OPTIONS = [ :database, :read, :write ].freeze # Valid client options. # # @since 2.1.2 VALID_OPTIONS = [ :app_name, :auth_mech, :auth_mech_properties, :auth_source, :connect, :connect_timeout, :compressors, :database, :heartbeat_frequency, :id_generator, :local_threshold, :logger, :max_idle_time, :max_pool_size, :max_read_retries, :min_pool_size, :monitoring, :password, :platform, :read, :read_retry_interval, :replica_set, :retry_writes, :server_selection_timeout, :socket_timeout, :ssl, :ssl_ca_cert, :ssl_ca_cert_string, :ssl_ca_cert_object, :ssl_cert, :ssl_cert_string, :ssl_cert_object, :ssl_key, :ssl_key_string, :ssl_key_object, :ssl_key_pass_phrase, :ssl_verify, :truncate_logs, :user, :wait_queue_timeout, :write, :zlib_compression_level ].freeze # The compression algorithms supported by the driver. # # @since 2.5.0 VALID_COMPRESSORS = [ Mongo::Protocol::Compressed::ZLIB ].freeze # @return [ Mongo::Cluster ] cluster The cluster of servers for the client. attr_reader :cluster # @return [ Mongo::Database ] database The database the client is operating on. attr_reader :database # @return [ Hash ] options The configuration options. attr_reader :options # Delegate command and collections execution to the current database. def_delegators :@database, :command, :collections # Delegate subscription to monitoring. def_delegators :@monitoring, :subscribe, :unsubscribe # Determine if this client is equivalent to another object. # # @example Check client equality. # client == other # # @param [ Object ] other The object to compare to. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Client) cluster == other.cluster && options == other.options end alias_method :eql?, :== # Get a collection object for the provided collection name. # # @example Get the collection. # client[:users] # # @param [ String, Symbol ] collection_name The name of the collection. # @param [ Hash ] options The options to the collection. # # @return [ Mongo::Collection ] The collection. # # @since 2.0.0 def [](collection_name, options = {}) database[collection_name, options] end # Get the hash value of the client. # # @example Get the client hash value. # client.hash # # @return [ Integer ] The client hash value. # # @since 2.0.0 def hash [cluster, options].hash end # Instantiate a new driver client. # # @example Instantiate a single server or mongos client. # Mongo::Client.new([ '127.0.0.1:27017' ]) # # @example Instantiate a client for a replica set. # Mongo::Client.new([ '127.0.0.1:27017', '127.0.0.1:27021' ]) # # @param [ Array, String ] addresses_or_uri The array of server addresses in the # form of host:port or a MongoDB URI connection string. # @param [ Hash ] options The options to be used by the client. # # @option options [ Symbol ] :auth_mech The authentication mechanism to # use. One of :mongodb_cr, :mongodb_x509, :plain, :scram # @option options [ String ] :auth_source The source to authenticate from. # @option options [ Symbol ] :connect The connection method to use. This # forces the cluster to behave in the specified way instead of # auto-discovering. One of :direct, :replica_set, :sharded # @option options [ String ] :database The database to connect to. # @option options [ Hash ] :auth_mech_properties # @option options [ Float ] :heartbeat_frequency The number of seconds for # the server monitor to refresh it's description via ismaster. # @option options [ Integer ] :local_threshold The local threshold boundary # in seconds for selecting a near server for an operation. # @option options [ Integer ] :server_selection_timeout The timeout in seconds # for selecting a server for an operation. # @option options [ String ] :password The user's password. # @option options [ Integer ] :max_idle_time The maximum seconds a socket can remain idle # since it has been checked in to the pool. # @option options [ Integer ] :max_pool_size The maximum size of the # connection pool. # @option options [ Integer ] :min_pool_size The minimum size of the # connection pool. # @option options [ Float ] :wait_queue_timeout The time to wait, in # seconds, in the connection pool for a connection to be checked in. # @option options [ Float ] :connect_timeout The timeout, in seconds, to # attempt a connection. # @option options [ Array ] :compressors A list of potential compressors to use, in order of preference. # The driver chooses the first compressor that is also supported by the server. Currently the driver only # supports 'zlib'. # @option options [ Hash ] :read The read preference options. They consist of a # mode specified as a symbol, an array of hashes known as tag_sets, # and local_threshold. # :mode can be one of :secondary, :secondary_preferred, :primary, # :primary_preferred, :nearest. # @option options [ Symbol ] :replica_set The name of the replica set to # connect to. Servers not in this replica set will be ignored. # @option options [ true, false ] :ssl Whether to use SSL. # @option options [ String ] :ssl_cert The certificate file used to identify # the connection against MongoDB. This option, if present, takes precedence # over the values of :ssl_cert_string and :ssl_cert_object # @option options [ String ] :ssl_cert_string A string containing the PEM-encoded # certificate used to identify the connection against MongoDB. This option, if present, # takes precedence over the value of :ssl_cert_object # @option options [ OpenSSL::X509::Certificate ] :ssl_cert_object The OpenSSL::X509::Certificate # used to identify the connection against MongoDB # @option options [ String ] :ssl_key The private keyfile used to identify the # connection against MongoDB. Note that even if the key is stored in the same # file as the certificate, both need to be explicitly specified. This option, # if present, takes precedence over the values of :ssl_key_string and :ssl_key_object # @option options [ String ] :ssl_key_string A string containing the PEM-encoded private key # used to identify the connection against MongoDB. This parameter, if present, # takes precedence over the value of option :ssl_key_object # @option options [ OpenSSL::PKey ] :ssl_key_object The private key used to identify the # connection against MongoDB # @option options [ String ] :ssl_key_pass_phrase A passphrase for the private key. # @option options [ true, false ] :ssl_verify Whether or not to do peer certification # validation. # @option options [ String ] :ssl_ca_cert The file containing a set of concatenated # certification authority certifications used to validate certs passed from the # other end of the connection. One of :ssl_ca_cert, :ssl_ca_cert_string or # :ssl_ca_cert_object (in order of priority) is required for :ssl_verify. # @option options [ String ] :ssl_ca_cert_string A string containing a set of concatenated # certification authority certifications used to validate certs passed from the # other end of the connection. One of :ssl_ca_cert, :ssl_ca_cert_string or # :ssl_ca_cert_object (in order of priority) is required for :ssl_verify. # @option options [ Array ] :ssl_ca_cert_object An array of # OpenSSL::X509::Certificate representing the certification authority certifications used # to validate certs passed from the other end of the connection. One of :ssl_ca_cert, # :ssl_ca_cert_string or :ssl_ca_cert_object (in order of priority) is required for :ssl_verify. # @option options [ Float ] :socket_timeout The timeout, in seconds, to # execute operations on a socket. # @option options [ String ] :user The user name. # @option options [ Hash ] :write The write concern options. Can be :w => # Integer|String, :fsync => Boolean, :j => Boolean. # @option options [ true, false ] :monitoring Initializes a client without # any default monitoring if false is provided. # @option options [ Logger ] :logger A custom logger if desired. # @option options [ true, false ] :truncate_logs Whether to truncate the # logs at the default 250 characters. # @option options [ Integer ] :max_read_retries The maximum number of read # retries on mongos query failures. # @option options [ Float ] :read_retry_interval The interval, in seconds, # in which reads on a mongos are retried. # @option options [ Object ] :id_generator A custom object to generate ids # for documents. Must respond to #generate. # @option options [ String, Symbol ] :app_name Application name that is printed to the # mongod logs upon establishing a connection in server versions >= 3.4. # @option options [ String ] :platform Platform information to include in the # metadata printed to the mongod logs upon establishing a connection in server versions >= 3.4. # @option options [ Integer ] :zlib_compression_level The Zlib compression level to use, if using compression. # See Ruby's Zlib module for valid levels. # @option options [ true, false ] :retry_writes Retry writes once when connected to a replica set # or sharded cluster versions 3.6 and up. # # @since 2.0.0 def initialize(addresses_or_uri, options = Options::Redacted.new) @monitoring = Monitoring.new(options) if addresses_or_uri.is_a?(::String) create_from_uri(addresses_or_uri, validate_options!(options)) else create_from_addresses(addresses_or_uri, validate_options!(options)) end yield(self) if block_given? end # Get an inspection of the client as a string. # # @example Inspect the client. # client.inspect # # @return [ String ] The inspection string. # # @since 2.0.0 def inspect "#" end # Get the server selector. It either uses the read preference defined in the client options # or defaults to a Primary server selector. # # @example Get the server selector. # client.server_selector # # @return [ Mongo::ServerSelector ] The server selector using the user-defined read preference # or a Primary server selector default. # # @since 2.5.0 def server_selector @server_selector ||= ServerSelector.get(read_preference || ServerSelector::PRIMARY) end # Get the read preference from the options passed to the client. # # @example Get the read preference. # client.read_preference # # @return [ BSON::Document ] The user-defined read preference. # # @since 2.0.0 def read_preference @read_preference ||= options[:read] end # Use the database with the provided name. This will switch the current # database the client is operating on. # # @example Use the provided database. # client.use(:users) # # @param [ String, Symbol ] name The name of the database to use. # # @return [ Mongo::Client ] The new client with new database. # # @since 2.0.0 def use(name) with(database: name) end # Provides a new client with the passed options merged over the existing # options of this client. Useful for one-offs to change specific options # without altering the original client. # # @example Get a client with changed options. # client.with(:read => { :mode => :primary_preferred }) # # @param [ Hash ] new_options The new options to use. # # @return [ Mongo::Client ] A new client instance. # # @since 2.0.0 def with(new_options = Options::Redacted.new) clone.tap do |client| opts = validate_options!(new_options) client.options.update(opts) Database.create(client) # We can't use the same cluster if some options that would affect it # have changed. if cluster_modifying?(opts) Cluster.create(client) end end end # Get the write concern for this client. If no option was provided, then a # default single server acknowledgement will be used. # # @example Get the client write concern. # client.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.0.0 def write_concern @write_concern ||= WriteConcern.get(options[:write]) end # Close all connections. # # @example Disconnect the client. # client.close # # @return [ true ] Always true. # # @since 2.1.0 def close @cluster.disconnect! and true end # Reconnect the client. # # @example Reconnect the client. # client.reconnect # # @return [ true ] Always true. # # @since 2.1.0 def reconnect @cluster.reconnect! and true end # Get the names of all databases. # # @example Get the database names. # client.database_names # # @param [ Hash ] filter The filter criteria for getting a list of databases. # @param [ Hash ] opts The command options. # # @return [ Array ] The names of the databases. # # @since 2.0.5 def database_names(filter = {}, opts = {}) list_databases(filter, true, opts).collect{ |info| info[Database::NAME] } end # Get info for each database. # # @example Get the info for each database. # client.list_databases # # @param [ Hash ] filter The filter criteria for getting a list of databases. # @param [ true, false ] name_only Whether to only return each database name without full metadata. # @param [ Hash ] opts The command options. # # @return [ Array ] The info for each database. # # @since 2.0.5 def list_databases(filter = {}, name_only = false, opts = {}) cmd = { listDatabases: 1 } cmd[:nameOnly] = !!name_only cmd[:filter] = filter unless filter.empty? use(Database::ADMIN).command(cmd, opts).first[Database::DATABASES] end # Returns a list of Mongo::Database objects. # # @example Get a list of Mongo::Database objects. # client.list_mongo_databases # # @param [ Hash ] filter The filter criteria for getting a list of databases. # @param [ Hash ] opts The command options. # # @return [ Array ] The list of database objects. # # @since 2.5.0 def list_mongo_databases(filter = {}, opts = {}) database_names(filter, opts).collect do |name| Database.new(self, name, options) end end # Start a session. # # @example Start a session. # client.start_session(causal_consistency: true) # # @param [ Hash ] options The session options. # # @note A Session cannot be used by multiple threads at once; session objects are not # thread-safe. # # @return [ Session ] The session. # # @since 2.5.0 def start_session(options = {}) cluster.send(:get_session, options.merge(implicit: false)) || (raise Error::InvalidSession.new(Session::SESSIONS_NOT_SUPPORTED)) end private def get_session(options = {}) cluster.send(:get_session, options) end def with_session(options = {}, &block) cluster.send(:with_session, options, &block) end def create_from_addresses(addresses, opts = Options::Redacted.new) @options = Database::DEFAULT_OPTIONS.merge(opts).freeze @cluster = Cluster.new(addresses, @monitoring, options) @database = Database.new(self, options[:database], options) end def create_from_uri(connection_string, opts = Options::Redacted.new) uri = URI.get(connection_string, opts) @options = validate_options!(Database::DEFAULT_OPTIONS.merge(uri.client_options.merge(opts))).freeze @cluster = Cluster.new(uri.servers, @monitoring, options) @database = Database.new(self, options[:database], options) end def initialize_copy(original) @options = original.options.dup @monitoring = Monitoring.new(@options) @database = nil @read_preference = nil @write_concern = nil end def cluster_modifying?(new_options) cluster_options = new_options.reject do |name| CRUD_OPTIONS.include?(name.to_sym) end cluster_options.any? do |name, value| options[name] != value end end def validate_options!(opts = Options::Redacted.new) return Options::Redacted.new unless opts opts.each.inject(Options::Redacted.new) do |_options, (k, v)| key = k.to_sym if VALID_OPTIONS.include?(key) validate_max_min_pool_size!(key, opts) if key == :compressors compressors = valid_compressors(v) _options[key] = compressors unless compressors.empty? else _options[key] = v end else log_warn("Unsupported client option '#{k}'. It will be ignored.") end _options end end def valid_compressors(compressors) compressors.select do |compressor| if !VALID_COMPRESSORS.include?(compressor) log_warn("Unsupported compressor '#{compressor}' in list '#{compressors}'. " + "This compressor will not be used.") false else true end end end def validate_max_min_pool_size!(option, opts) if option == :min_pool_size && opts[:min_pool_size] max = opts[:max_pool_size] || Server::ConnectionPool::Queue::MAX_SIZE raise Error::InvalidMinPoolSize.new(opts[:min_pool_size], max) unless opts[:min_pool_size] <= max end true end end end mongo-2.5.1/lib/mongo/grid.rb0000644000004100000410000000123613257253113016031 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/grid/file' require 'mongo/grid/fs_bucket' require 'mongo/grid/stream' mongo-2.5.1/lib/mongo/socket/0000755000004100000410000000000013257253113016045 5ustar www-datawww-datamongo-2.5.1/lib/mongo/socket/ssl.rb0000644000004100000410000001343413257253113017200 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'openssl' module Mongo class Socket # Wrapper for SSL sockets. # # @since 2.0.0 class SSL < Socket include OpenSSL # @return [ SSLContext ] context The ssl context. attr_reader :context # @return [ String ] host The host to connect to. attr_reader :host # @return [ String ] host_name The original host name. attr_reader :host_name # @return [ Hash ] The ssl options. attr_reader :options # @return [ Integer ] port The port to connect to. attr_reader :port # @return [ Float ] timeout The socket timeout. attr_reader :timeout # Establishes a socket connection. # # @example Connect the socket. # sock.connect! # # @note This method mutates the object by setting the socket # internally. # # @return [ SSL ] The connected socket instance. # # @since 2.0.0 def connect!(connect_timeout = nil) Timeout.timeout(connect_timeout, Error::SocketTimeoutError) do handle_errors { @tcp_socket.connect(::Socket.pack_sockaddr_in(port, host)) } @socket = OpenSSL::SSL::SSLSocket.new(@tcp_socket, context) @socket.hostname = @host_name unless BSON::Environment.jruby? @socket.sync_close = true handle_errors { @socket.connect } verify_certificate!(@socket) self end end # Initializes a new SSL socket. # # @example Create the SSL socket. # SSL.new('::1', 27017, 30) # # @param [ String ] host The hostname or IP address. # @param [ Integer ] port The port number. # @param [ Float ] timeout The socket timeout value. # @param [ Integer ] family The socket family. # @param [ Hash ] options The ssl options. # # @since 2.0.0 def initialize(host, port, host_name, timeout, family, options = {}) @host, @port, @host_name, @timeout, @options = host, port, host_name, timeout, options @context = create_context(options) @family = family @tcp_socket = ::Socket.new(family, SOCK_STREAM, 0) @tcp_socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) set_socket_options(@tcp_socket) end # Read a single byte from the socket. # # @example Read a single byte. # socket.readbyte # # @return [ Object ] The read byte. # # @since 2.0.0 def readbyte handle_errors do byte = socket.read(1).bytes.to_a[0] byte.nil? ? raise(EOFError) : byte end end # This socket can only be used if the ssl socket (@socket) has been created. # # @example Is the socket connectable? # socket.connectable? # # @return [ true, false ] If the socket is connectable. # # @since 2.2.5 def connectable? !!@socket end private def create_context(options) context = OpenSSL::SSL::SSLContext.new set_cert(context, options) set_key(context, options) set_cert_verification(context, options) unless options[:ssl_verify] == false context end def set_cert(context, options) if options[:ssl_cert] context.cert = OpenSSL::X509::Certificate.new(File.open(options[:ssl_cert])) elsif options[:ssl_cert_string] context.cert = OpenSSL::X509::Certificate.new(options[:ssl_cert_string]) elsif options[:ssl_cert_object] context.cert = options[:ssl_cert_object] end end def set_key(context, options) passphrase = options[:ssl_key_pass_phrase] if options[:ssl_key] context.key = passphrase ? OpenSSL::PKey.read(File.open(options[:ssl_key]), passphrase) : OpenSSL::PKey.read(File.open(options[:ssl_key])) elsif options[:ssl_key_string] context.key = passphrase ? OpenSSL::PKey.read(options[:ssl_key_string], passphrase) : OpenSSL::PKey.read(options[:ssl_key_string]) elsif options[:ssl_key_object] context.key = options[:ssl_key_object] end end def set_cert_verification(context, options) context.verify_mode = OpenSSL::SSL::VERIFY_PEER cert_store = OpenSSL::X509::Store.new if options[:ssl_ca_cert] cert_store.add_cert(OpenSSL::X509::Certificate.new(File.open(options[:ssl_ca_cert]))) elsif options[:ssl_ca_cert_string] cert_store.add_cert(OpenSSL::X509::Certificate.new(options[:ssl_ca_cert_string])) elsif options[:ssl_ca_cert_object] raise TypeError("Option :ssl_ca_cert_object should be an array of OpenSSL::X509:Certificate objects") unless options[:ssl_ca_cert_object].is_a? Array options[:ssl_ca_cert_object].each {|cert| cert_store.add_cert(cert)} else cert_store.set_default_paths end context.cert_store = cert_store end def verify_certificate!(socket) if context.verify_mode == OpenSSL::SSL::VERIFY_PEER unless OpenSSL::SSL.verify_certificate_identity(socket.peer_cert, host_name) raise Error::SocketError, 'SSL handshake failed due to a hostname mismatch.' end end end end end end mongo-2.5.1/lib/mongo/socket/tcp.rb0000644000004100000410000000461113257253113017162 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Socket # Wrapper for TCP sockets. # # @since 2.0.0 class TCP < Socket # @return [ String ] host The host to connect to. attr_reader :host # @return [ Integer ] port The port to connect to. attr_reader :port # @return [ Float ] timeout The socket timeout. attr_reader :timeout # Establishes a socket connection. # # @example Connect the socket. # sock.connect! # # @note This method mutates the object by setting the socket # internally. # # @return [ TCP ] The connected socket instance. # # @since 2.0.0 def connect!(connect_timeout = nil) Timeout.timeout(connect_timeout, Error::SocketTimeoutError) do socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) handle_errors { socket.connect(::Socket.pack_sockaddr_in(port, host)) } self end end # Initializes a new TCP socket. # # @example Create the TCP socket. # TCP.new('::1', 27017, 30, Socket::PF_INET) # TCP.new('127.0.0.1', 27017, 30, Socket::PF_INET) # # @param [ String ] host The hostname or IP address. # @param [ Integer ] port The port number. # @param [ Float ] timeout The socket timeout value. # @param [ Integer ] family The socket family. # # @since 2.0.0 def initialize(host, port, timeout, family) @host, @port, @timeout = host, port, timeout super(family) end # This object does not wrap another socket so it's always connectable. # # @example Is the socket connectable? # socket.connectable? # # @return [ true, false ] If the socket is connectable. # # @since 2.2.5 def connectable? true end end end end mongo-2.5.1/lib/mongo/socket/unix.rb0000644000004100000410000000370413257253113017361 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Socket # Wrapper for Unix sockets. # # @since 2.0.0 class Unix < Socket # @return [ String ] path The path to connect to. attr_reader :path # @return [ Float ] timeout The socket timeout. attr_reader :timeout # Establishes a socket connection. # # @example Connect the socket. # sock.connect! # # @note This method mutates the object by setting the socket # internally. # # @return [ Unix ] The connected socket instance. # # @since 2.0.0 def connect!(connect_timeout = nil) self end # Initializes a new Unix socket. # # @example Create the Unix socket. # Unix.new('/path/to.sock', 5) # # @param [ String ] path The path. # @param [ Float ] timeout The socket timeout value. # # @since 2.0.0 def initialize(path, timeout) @path, @timeout = path, timeout @socket = ::UNIXSocket.new(path) set_socket_options(@socket) end # This socket can only be used if the unix socket (@socket) has been created. # # @example Is the socket connectable? # socket.connectable? # # @return [ true, false ] If the socket is connectable. # # @since 2.2.5 def connectable? !!@socket end end end end mongo-2.5.1/lib/mongo/event.rb0000644000004100000410000000264513257253113016232 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/event/listeners' require 'mongo/event/publisher' require 'mongo/event/subscriber' require 'mongo/event/primary_elected' require 'mongo/event/member_discovered' require 'mongo/event/description_changed' require 'mongo/event/standalone_discovered' module Mongo module Event # When a standalone is discovered. # # @since 2.0.6 STANDALONE_DISCOVERED = 'standalone_discovered'.freeze # When a server is elected primary. # # @since 2.0.0 # # @deprecated. Will be removed in 3.0 PRIMARY_ELECTED = 'primary_elected'.freeze # When a server is discovered to be a member of a topology. # # @since 2.4.0 MEMBER_DISCOVERED = 'member_discovered'.freeze # When a server is to be removed from a cluster. # # @since 2.0.6 DESCRIPTION_CHANGED = 'description_changed'.freeze end end mongo-2.5.1/lib/mongo/loggable.rb0000644000004100000410000000527213257253113016664 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo # Allows objects to easily log operations. # # @since 2.0.0 module Loggable # The standard MongoDB log prefix. # # @since 2.0.0 PREFIX = 'MONGODB'.freeze # Convenience method to log debug messages with the standard prefix. # # @example Log a debug message. # log_debug('Message') # # @param [ String ] message The message to log. # # @since 2.0.0 def log_debug(message) logger.debug(format_message(message)) if logger.debug? end # Convenience method to log error messages with the standard prefix. # # @example Log a error message. # log_error('Message') # # @param [ String ] message The message to log. # # @since 2.0.0 def log_error(message) logger.error(format_message(message)) if logger.error? end # Convenience method to log fatal messages with the standard prefix. # # @example Log a fatal message. # log_fatal('Message') # # @param [ String ] message The message to log. # # @since 2.0.0 def log_fatal(message) logger.fatal(format_message(message)) if logger.fatal? end # Convenience method to log info messages with the standard prefix. # # @example Log a info message. # log_info('Message') # # @param [ String ] message The message to log. # # @since 2.0.0 def log_info(message) logger.info(format_message(message)) if logger.info? end # Convenience method to log warn messages with the standard prefix. # # @example Log a warn message. # log_warn('Message') # # @param [ String ] message The message to log. # # @since 2.0.0 def log_warn(message) logger.warn(format_message(message)) if logger.warn? end # Get the logger instance. # # @example Get the logger instance. # loggable.logger # # @return [ Logger ] The logger. # # @since 2.1.0 def logger ((options && options[:logger]) || Logger.logger) end private def format_message(message) format("%s | %s".freeze, PREFIX, message) end end end mongo-2.5.1/lib/mongo/version.rb0000644000004100000410000000126213257253113016570 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo # The current version of the driver. # # @since 2.0.0 VERSION = '2.5.1'.freeze end mongo-2.5.1/lib/mongo/auth.rb0000644000004100000410000000542013257253113016044 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/auth/cr' require 'mongo/auth/ldap' require 'mongo/auth/scram' require 'mongo/auth/user' require 'mongo/auth/x509' require 'mongo/auth/roles' module Mongo # This namespace contains all authentication related behaviour. # # @since 2.0.0 module Auth extend self # The external database name. # # @since 2.0.0 EXTERNAL = '$external'.freeze # Constant for the nonce command. # # @since 2.0.0 GET_NONCE = { getnonce: 1 }.freeze # Constant for the nonce field. # # @since 2.0.0 NONCE = 'nonce'.freeze # Map the symbols parsed from the URI connection string to strategies. # # @since 2.0.0 SOURCES = { mongodb_cr: CR, mongodb_x509: X509, plain: LDAP, scram: SCRAM } # Get the authorization strategy for the provided auth mechanism. # # @example Get the strategy. # Auth.get(user) # # @param [ Auth::User ] user The user object. # # @return [ CR, X509, LDAP, Kerberos ] The auth strategy. # # @since 2.0.0 def get(user) mechanism = user.mechanism raise InvalidMechanism.new(mechanism) if !SOURCES.has_key?(mechanism) SOURCES[mechanism].new(user) end # Raised when trying to get an invalid authorization mechanism. # # @since 2.0.0 class InvalidMechanism < RuntimeError # Instantiate the new error. # # @example Instantiate the error. # Mongo::Auth::InvalidMechanism.new(:test) # # @param [ Symbol ] mechanism The provided mechanism. # # @since 2.0.0 def initialize(mechanism) super("#{mechanism.inspect} is invalid, please use mongodb_cr, mongodb_x509, gssapi or plain.") end end # Raised when a user is not authorized on a database. # # @since 2.0.0 class Unauthorized < RuntimeError # Instantiate the new error. # # @example Instantiate the error. # Mongo::Auth::Unauthorized.new(user) # # @param [ Mongo::Auth::User ] user The unauthorized user. # # @since 2.0.0 def initialize(user) super("User #{user.name} is not authorized to access #{user.database}.") end end end end mongo-2.5.1/lib/mongo/grid/0000755000004100000410000000000013257253113015502 5ustar www-datawww-datamongo-2.5.1/lib/mongo/grid/fs_bucket.rb0000644000004100000410000003732513257253113020006 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Grid # Represents a view of the GridFS in the database. # # @since 2.0.0 class FSBucket extend Forwardable # The default root prefix. # # @since 2.0.0 DEFAULT_ROOT = 'fs'.freeze # The specification for the chunks collection index. # # @since 2.0.0 CHUNKS_INDEX = { :files_id => 1, :n => 1 }.freeze # The specification for the files collection index. # # @since 2.1.0 FILES_INDEX = { filename: 1, uploadDate: 1 }.freeze # @return [ Collection ] chunks_collection The chunks collection. # # @since 2.0.0 attr_reader :chunks_collection # @return [ Database ] database The database. # # @since 2.0.0 attr_reader :database # @return [ Collection ] files_collection The files collection. # # @since 2.0.0 attr_reader :files_collection # @return [ Hash ] options The FSBucket options. # # @since 2.1.0 attr_reader :options # Get client from the database. # # @since 2.1.0 def_delegators :database, :client # Find files collection documents matching a given selector. # # @example Find files collection documents by a filename. # fs.find(filename: 'file.txt') # # @param [ Hash ] selector The selector to use in the find. # @param [ Hash ] options The options for the find. # # @option options [ Integer ] :batch_size The number of documents returned in each batch # of results from MongoDB. # @option options [ Integer ] :limit The max number of docs to return from the query. # @option options [ true, false ] :no_cursor_timeout The server normally times out idle # cursors after an inactivity period (10 minutes) to prevent excess memory use. # Set this option to prevent that. # @option options [ Integer ] :skip The number of docs to skip before returning results. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. # # @return [ CollectionView ] The collection view. # # @since 2.1.0 def find(selector = nil, options = {}) opts = options.merge(read: read_preference) if read_preference files_collection.find(selector, opts || options) end # Find a file in the GridFS. # # @example Find a file by its id. # fs.find_one(_id: id) # # @example Find a file by its filename. # fs.find_one(filename: 'test.txt') # # @param [ Hash ] selector The selector. # # @return [ Grid::File ] The file. # # @since 2.0.0 # # @deprecated Please use #find instead with a limit of -1. # Will be removed in version 3.0. def find_one(selector = nil) file_info = files_collection.find(selector).first return nil unless file_info chunks = chunks_collection.find(:files_id => file_info[:_id]).sort(:n => 1) Grid::File.new(chunks.to_a, Options::Mapper.transform(file_info, Grid::File::Info::MAPPINGS.invert)) end # Insert a single file into the GridFS. # # @example Insert a single file. # fs.insert_one(file) # # @param [ Grid::File ] file The file to insert. # # @return [ BSON::ObjectId ] The file id. # # @since 2.0.0 # # @deprecated Please use #upload_from_stream or #open_upload_stream instead. # Will be removed in version 3.0. def insert_one(file) @indexes ||= ensure_indexes! chunks_collection.insert_many(file.chunks) files_collection.insert_one(file.info) file.id end # Create the GridFS. # # @example Create the GridFS. # Grid::FSBucket.new(database) # # @param [ Database ] database The database the files reside in. # @param [ Hash ] options The GridFS options. # # @option options [ String ] :fs_name The prefix for the files and chunks # collections. # @option options [ String ] :bucket_name The prefix for the files and chunks # collections. # @option options [ Integer ] :chunk_size Override the default chunk # size. # @option options [ String ] :write The write concern. # @option options [ String ] :read The read preference. # # @since 2.0.0 def initialize(database, options = {}) @database = database @options = options @chunks_collection = database[chunks_name] @files_collection = database[files_name] end # Get the prefix for the GridFS # # @example Get the prefix. # fs.prefix # # @return [ String ] The GridFS prefix. # # @since 2.0.0 def prefix @options[:fs_name] || @options[:bucket_name]|| DEFAULT_ROOT end # Remove a single file from the GridFS. # # @example Remove a file from the GridFS. # fs.delete_one(file) # # @param [ Grid::File ] file The file to remove. # # @return [ Result ] The result of the remove. # # @since 2.0.0 def delete_one(file) delete(file.id) end # Remove a single file, identified by its id from the GridFS. # # @example Remove a file from the GridFS. # fs.delete(id) # # @param [ BSON::ObjectId, Object ] id The id of the file to remove. # # @return [ Result ] The result of the remove. # # @raise [ Error::FileNotFound ] If the file is not found. # # @since 2.1.0 def delete(id) result = files_collection.find({ :_id => id }, @options).delete_one chunks_collection.find({ :files_id => id }, @options).delete_many raise Error::FileNotFound.new(id, :id) if result.n == 0 result end # Opens a stream from which a file can be downloaded, specified by id. # # @example Open a stream from which a file can be downloaded. # fs.open_download_stream(id) # # @param [ BSON::ObjectId, Object ] id The id of the file to read. # # @return [ Stream::Read ] The stream to read from. # # @yieldparam [ Hash ] The read stream. # # @since 2.1.0 def open_download_stream(id) read_stream(id).tap do |stream| if block_given? yield stream stream.close end end end # Downloads the contents of the file specified by id and writes them to # the destination io object. # # @example Download the file and write it to the io object. # fs.download_to_stream(id, io) # # @param [ BSON::ObjectId, Object ] id The id of the file to read. # @param [ IO ] io The io object to write to. # # @since 2.1.0 def download_to_stream(id, io) open_download_stream(id) do |stream| stream.each do |chunk| io << chunk end end end # Opens a stream from which the application can read the contents of the stored file # specified by filename and the revision in options. # # Revision numbers are defined as follows: # 0 = the original stored file # 1 = the first revision # 2 = the second revision # etc… # -2 = the second most recent revision # -1 = the most recent revision # # @example Open a stream to download the most recent revision. # fs.open_download_stream_by_name('some-file.txt') # # # @example Open a stream to download the original file. # fs.open_download_stream_by_name('some-file.txt', revision: 0) # # @example Open a stream to download the second revision of the stored file. # fs.open_download_stream_by_name('some-file.txt', revision: 2) # # @param [ String ] filename The file's name. # @param [ Hash ] opts Options for the download. # # @option opts [ Integer ] :revision The revision number of the file to download. # Defaults to -1, the most recent version. # # @return [ Stream::Read ] The stream to read from. # # @raise [ Error::FileNotFound ] If the file is not found. # @raise [ Error::InvalidFileRevision ] If the requested revision is not found for the file. # # @yieldparam [ Hash ] The read stream. # # @since 2.1.0 def open_download_stream_by_name(filename, opts = {}, &block) revision = opts.fetch(:revision, -1) if revision < 0 skip = revision.abs - 1 sort = { 'uploadDate' => Mongo::Index::DESCENDING } else skip = revision sort = { 'uploadDate' => Mongo::Index::ASCENDING } end file_doc = files_collection.find({ filename: filename} , projection: { _id: 1 }, sort: sort, skip: skip, limit: -1).first unless file_doc raise Error::FileNotFound.new(filename, :filename) unless opts[:revision] raise Error::InvalidFileRevision.new(filename, opts[:revision]) end open_download_stream(file_doc[:_id], &block) end # Downloads the contents of the stored file specified by filename and by the # revision in options and writes the contents to the destination io object. # # Revision numbers are defined as follows: # 0 = the original stored file # 1 = the first revision # 2 = the second revision # etc… # -2 = the second most recent revision # -1 = the most recent revision # # @example Download the most recent revision. # fs.download_to_stream_by_name('some-file.txt', io) # # # @example Download the original file. # fs.download_to_stream_by_name('some-file.txt', io, revision: 0) # # @example Download the second revision of the stored file. # fs.download_to_stream_by_name('some-file.txt', io, revision: 2) # # @param [ String ] filename The file's name. # @param [ IO ] io The io object to write to. # @param [ Hash ] opts Options for the download. # # @option opts [ Integer ] :revision The revision number of the file to download. # Defaults to -1, the most recent version. # # @raise [ Error::FileNotFound ] If the file is not found. # @raise [ Error::InvalidFileRevision ] If the requested revision is not found for the file. # # @since 2.1.0 def download_to_stream_by_name(filename, io, opts = {}) download_to_stream(open_download_stream_by_name(filename, opts).file_id, io) end # Opens an upload stream to GridFS to which the contents of a user file came be written. # # @example Open a stream to which the contents of a file came be written. # fs.open_upload_stream('a-file.txt') # # @param [ String ] filename The filename of the file to upload. # @param [ Hash ] opts The options for the write stream. # # @option opts [ Object ] :file_id An optional unique file id. An ObjectId is generated otherwise. # @option opts [ Integer ] :chunk_size Override the default chunk size. # @option opts [ Hash ] :write The write concern. # @option opts [ Hash ] :metadata User data for the 'metadata' field of the files # collection document. # @option opts [ String ] :content_type The content type of the file. # Deprecated, please use the metadata document instead. # @option opts [ Array ] :aliases A list of aliases. # Deprecated, please use the metadata document instead. # # @return [ Stream::Write ] The write stream. # # @yieldparam [ Hash ] The write stream. # # @since 2.1.0 def open_upload_stream(filename, opts = {}) write_stream(filename, opts).tap do |stream| if block_given? yield stream stream.close end end end # Uploads a user file to a GridFS bucket. # Reads the contents of the user file from the source stream and uploads it as chunks in the # chunks collection. After all the chunks have been uploaded, it creates a files collection # document for the filename in the files collection. # # @example Upload a file to the GridFS bucket. # fs.upload_from_stream('a-file.txt', file) # # @param [ String ] filename The filename of the file to upload. # @param [ IO ] io The source io stream to upload from. # @param [ Hash ] opts The options for the write stream. # # @option opts [ Object ] :file_id An optional unique file id. An ObjectId is generated otherwise. # @option opts [ Integer ] :chunk_size Override the default chunk size. # @option opts [ Hash ] :write The write concern. # @option opts [ Hash ] :metadata User data for the 'metadata' field of the files # collection document. # @option opts [ String ] :content_type The content type of the file. Deprecated, please # use the metadata document instead. # @option opts [ Array ] :aliases A list of aliases. Deprecated, please use the # metadata document instead. # # @return [ BSON::ObjectId ] The ObjectId file id. # # @since 2.1.0 def upload_from_stream(filename, io, opts = {}) open_upload_stream(filename, opts) do |stream| begin stream.write(io) rescue IOError begin stream.abort rescue Error::OperationFailure end raise end end.file_id end # Get the read preference. # # @example Get the read preference. # fs.read_preference # # @return [ Mongo::ServerSelector ] The read preference. # # @since 2.1.0 def read_preference @read_preference ||= options[:read] || database.read_preference end # Get the write concern. # # @example Get the write concern. # stream.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.1.0 def write_concern @write_concern ||= @options[:write] ? WriteConcern.get(@options[:write]) : database.write_concern end private def read_stream(id) Stream.get(self, Stream::READ_MODE, { file_id: id }.merge!(options)) end def write_stream(filename, opts) Stream.get(self, Stream::WRITE_MODE, { filename: filename }.merge!(options).merge!(opts)) end def chunks_name "#{prefix}.#{Grid::File::Chunk::COLLECTION}" end def files_name "#{prefix}.#{Grid::File::Info::COLLECTION}" end def ensure_indexes! if files_collection.find({}, limit: 1, projection: { _id: 1 }).first.nil? chunks_collection.indexes.create_one(FSBucket::CHUNKS_INDEX, :unique => true) files_collection.indexes.create_one(FSBucket::FILES_INDEX) end end end end end mongo-2.5.1/lib/mongo/grid/stream/0000755000004100000410000000000013257253113016775 5ustar www-datawww-datamongo-2.5.1/lib/mongo/grid/stream/read.rb0000644000004100000410000001441713257253113020244 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Grid class FSBucket module Stream # A stream that reads files from the FSBucket. # # @since 2.1.0 class Read include Enumerable # @return [ FSBucket ] fs The fs bucket from which this stream reads. # # @since 2.1.0 attr_reader :fs # @return [ Hash ] options The stream options. # # @since 2.1.0 attr_reader :options # @return [ BSON::ObjectId, Object ] file_id The id of the file being read. # # @since 2.1.0 attr_reader :file_id # Create a stream for reading files from the FSBucket. # # @example Create the stream. # Stream::Read.new(fs, options) # # @param [ FSBucket ] fs The GridFS bucket object. # @param [ Hash ] options The read stream options. # # @since 2.1.0 def initialize(fs, options) @fs = fs @options = options.dup @file_id = @options.delete(:file_id) @open = true end # Iterate through chunk data streamed from the FSBucket. # # @example Iterate through the chunk data. # stream.each do |data| # buffer << data # end # # @return [ Enumerator ] The enumerator. # # @raise [ Error::MissingFileChunk ] If a chunk is found out of sequence. # # @yieldparam [ Hash ] Each chunk of file data. # # @since 2.1.0 def each ensure_readable! num_chunks = (file_info.length + file_info.chunk_size - 1) / file_info.chunk_size view.each_with_index.reduce(0) do |length_read, (doc, index)| chunk = Grid::File::Chunk.new(doc) validate!(index, num_chunks, chunk, length_read) data = chunk.data.data yield data length_read += data.size end if block_given? view.to_enum end # Read all file data. # # @example Read the file data. # stream.read # # @return [ String ] The file data. # # @raise [ Error::MissingFileChunk ] If a chunk is found out of sequence. # # @since 2.1.0 def read to_a.join end # Close the read stream. # # @example Close the stream. # stream.close # # @return [ BSON::ObjectId, Object ] The file id. # # @raise [ Error::ClosedStream ] If the stream is already closed. # # @since 2.1.0 def close ensure_open! view.close_query @open = false file_id end # Is the stream closed. # # @example Is the stream closd. # stream.closed? # # @return [ true, false ] Whether the stream is closed. # # @since 2.1.0 def closed? !@open end # Get the read preference used when streaming. # # @example Get the read preference. # stream.read_preference # # @return [ Mongo::ServerSelector ] The read preference. # # @since 2.1.0 def read_preference @read_preference ||= options[:read] || fs.read_preference end # Get the files collection file information document for the file being read. # # @example Get the file info document. # stream.file_info # # @return [ Hash ] The file info document. # # @since 2.1.0 def file_info doc = fs.files_collection.find(_id: file_id).first if doc @file_info ||= File::Info.new(Options::Mapper.transform(doc, File::Info::MAPPINGS.invert)) end end private def ensure_open! raise Error::ClosedStream.new if closed? end def ensure_file_info! raise Error::FileNotFound.new(file_id, :id) unless file_info end def ensure_readable! ensure_open! ensure_file_info! end def view @view ||= (opts = options.merge(read: read_preference) if read_preference fs.chunks_collection.find({ :files_id => file_id }, opts || options).sort(:n => 1)) end def validate!(index, num_chunks, chunk, length_read) validate_n!(index, chunk) validate_length!(index, num_chunks, chunk, length_read) end def raise_unexpected_chunk_length!(chunk) close raise Error::UnexpectedChunkLength.new(file_info.chunk_size, chunk) end def validate_length!(index, num_chunks, chunk, length_read) if num_chunks > 0 && chunk.data.data.size > 0 raise Error::ExtraFileChunk.new unless index < num_chunks if index == num_chunks - 1 unless chunk.data.data.size + length_read == file_info.length raise_unexpected_chunk_length!(chunk) end elsif chunk.data.data.size != file_info.chunk_size raise_unexpected_chunk_length!(chunk) end end end def validate_n!(index, chunk) unless index == chunk.n close raise Error::MissingFileChunk.new(index, chunk) end end end end end end end mongo-2.5.1/lib/mongo/grid/stream/write.rb0000644000004100000410000001343513257253113020462 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Grid class FSBucket module Stream # A stream that writes files to the FSBucket. # # @since 2.1.0 class Write # @return [ FSBucket ] fs The fs bucket to which this stream writes. # # @since 2.1.0 attr_reader :fs # @return [ Object ] file_id The id of the file being uploaded. # # @since 2.1.0 attr_reader :file_id # @return [ String ] filename The name of the file being uploaded. # # @since 2.1.0 attr_reader :filename # @return [ Hash ] options The write stream options. # # @since 2.1.0 attr_reader :options # Create a stream for writing files to the FSBucket. # # @example Create the stream. # Stream::Write.new(fs, options) # # @param [ FSBucket ] fs The GridFS bucket object. # @param [ Hash ] options The write stream options. # # @option opts [ Object ] :file_id The file id. An ObjectId is generated otherwise. # @option opts [ Integer ] :chunk_size Override the default chunk size. # @option opts [ Hash ] :write The write concern. # @option opts [ Hash ] :metadata User data for the 'metadata' field of the files collection document. # @option opts [ String ] :content_type The content type of the file. # Deprecated, please use the metadata document instead. # @option opts [ Array ] :aliases A list of aliases. # Deprecated, please use the metadata document instead. # # @since 2.1.0 def initialize(fs, options) @fs = fs @length = 0 @n = 0 @file_id = options[:file_id] || BSON::ObjectId.new @options = options @filename = @options[:filename] @open = true end # Write to the GridFS bucket from the source stream. # # @example Write to GridFS. # stream.write(io) # # @param [ IO ] io The source io stream to upload from. # # @return [ Stream::Write ] self The write stream itself. # # @since 2.1.0 def write(io) ensure_open! @indexes ||= ensure_indexes! @length += io.size chunks = File::Chunk.split(io, file_info, @n) @n += chunks.size chunks_collection.insert_many(chunks) unless chunks.empty? self end # Close the write stream. # # @example Close the stream. # stream.close # # @return [ BSON::ObjectId, Object ] The file id. # # @raise [ Error::ClosedStream ] If the stream is already closed. # # @since 2.1.0 def close ensure_open! update_length files_collection.insert_one(file_info, @options) @open = false file_id end # Get the write concern used when uploading. # # @example Get the write concern. # stream.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.1.0 def write_concern @write_concern ||= @options[:write] ? WriteConcern.get(@options[:write]) : fs.write_concern end # Is the stream closed. # # @example Is the stream closed. # stream.closed? # # @return [ true, false ] Whether the stream is closed. # # @since 2.1.0 def closed? !@open end # Abort the upload by deleting all chunks already inserted. # # @example Abort the write operation. # stream.abort # # @return [ true ] True if the operation was aborted and the stream is closed. # # @since 2.1.0 def abort fs.chunks_collection.find({ :files_id => file_id }, @options).delete_many @open = false || true end private def chunks_collection with_write_concern(fs.chunks_collection) end def files_collection with_write_concern(fs.files_collection) end def with_write_concern(collection) if write_concern.nil? || (collection.write_concern && collection.write_concern.options == write_concern.options) collection else collection.with(write: write_concern.options) end end def update_length file_info.document[:length] = @length end def file_info doc = { length: @length, _id: file_id, filename: filename } @file_info ||= File::Info.new(options.merge(doc)) end def ensure_indexes! fs.send(:ensure_indexes!) end def ensure_open! raise Error::ClosedStream.new if closed? end end end end end end mongo-2.5.1/lib/mongo/grid/file/0000755000004100000410000000000013257253113016421 5ustar www-datawww-datamongo-2.5.1/lib/mongo/grid/file/info.rb0000644000004100000410000001521213257253113017702 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Grid class File # Encapsulates behaviour around GridFS files collection file document. # # @since 2.0.0 # # @deprecated Please use the 'stream' API on a FSBucket instead. # Will be removed in driver version 3.0. class Info # Name of the files collection. # # @since 2.0.0 COLLECTION = 'files'.freeze # Mappings of user supplied fields to db specification. # # @since 2.0.0 MAPPINGS = { :chunk_size => :chunkSize, :content_type => :contentType, :filename => :filename, :_id => :_id, :md5 => :md5, :length => :length, :metadata => :metadata, :upload_date => :uploadDate, :aliases => :aliases }.freeze # Default content type for stored files. # # @since 2.0.0 DEFAULT_CONTENT_TYPE = 'binary/octet-stream'.freeze # @return [ BSON::Document ] document The files collection document. attr_reader :document # Is this file information document equal to another? # # @example Check file information document equality. # file_info == other # # @param [ Object ] other The object to check against. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Info) document == other.document end # Get the BSON type for a files information document. # # @example Get the BSON type. # file_info.bson_type # # @return [ Integer ] The BSON type. # # @since 2.0.0 def bson_type BSON::Hash::BSON_TYPE end # Get the file chunk size. # # @example Get the chunk size. # file_info.chunk_size # # @return [ Integer ] The chunksize in bytes. # # @since 2.0.0 def chunk_size document[:chunkSize] end # Get the file information content type. # # @example Get the content type. # file_info.content_type # # @return [ String ] The content type. # # @since 2.0.0 def content_type document[:contentType] end # Get the filename from the file information. # # @example Get the filename. # file_info.filename # # @return [ String ] The filename. def filename document[:filename] end # Get the file id from the file information. # # @example Get the file id. # file_info.id # # @return [ BSON::ObjectId ] The file id. # # @since 2.0.0 def id document[:_id] end # Create the new file information document. # # @example Create the new file information document. # Info.new(:filename => 'test.txt') # # @param [ BSON::Document ] document The document to create from. # # @since 2.0.0 def initialize(document) @document = default_document.merge(Options::Mapper.transform(document, MAPPINGS)) @client_md5 = Digest::MD5.new end # Get a readable inspection for the object. # # @example Inspect the file information. # file_info.inspect # # @return [ String ] The nice inspection. # # @since 2.0.0 def inspect "#" end # Get the length of the document in bytes. # # @example Get the file length from the file information document. # file_info.length # # @return [ Integer ] The file length. # # @since 2.0.0 def length document[:length] end alias :size :length # Get the additional metadata from the file information document. # # @example Get additional metadata. # file_info.metadata # # @return [ String ] The additional metadata from file information document. # # @since 2.0.0 def metadata document[:metadata] end # Get the md5 hash. # # @example Get the md5 hash. # file_info.md5 # # @return [ String ] The md5 hash as a string. # # @since 2.0.0 def md5 document[:md5] || @client_md5 end # Convert the file information document to BSON for storage. # # @note If no md5 exists in the file information document (it was loaded # from the server and is not a new file) then we digest the md5 and set it. # # @example Convert the file information document to BSON. # file_info.to_bson # # @param [ BSON::ByteBuffer ] buffer The encoded BSON buffer to append to. # @param [ true, false ] validating_keys Whether keys should be validated when serializing. # # @return [ String ] The raw BSON data. # # @since 2.0.0 def to_bson(buffer = BSON::ByteBuffer.new, validating_keys = BSON::Config.validating_keys?) document[:md5] ||= @client_md5.hexdigest document.to_bson(buffer) end # Get the upload date. # # @example Get the upload date. # file_info.upload_date # # @return [ Time ] The upload date. # # @since 2.0.0 def upload_date document[:uploadDate] end private def default_document BSON::Document.new( :_id => BSON::ObjectId.new, :chunkSize => Chunk::DEFAULT_SIZE, :uploadDate => Time.now.utc, :contentType => DEFAULT_CONTENT_TYPE ) end end end end end mongo-2.5.1/lib/mongo/grid/file/chunk.rb0000644000004100000410000001214313257253113020057 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'stringio' module Mongo module Grid class File # Encapsulates behaviour around GridFS chunks of file data. # # @since 2.0.0 class Chunk # Name of the chunks collection. # # @since 2.0.0 COLLECTION = 'chunks'.freeze # Default size for chunks of data. # # @since 2.0.0 DEFAULT_SIZE = (255 * 1024).freeze # @return [ BSON::Document ] document The document to store for the # chunk. attr_reader :document # Check chunk equality. # # @example Check chunk equality. # chunk == other # # @param [ Object ] other The object ot compare to. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Chunk) document == other.document end # Get the BSON type for a chunk document. # # @example Get the BSON type. # chunk.bson_type # # @return [ Integer ] The BSON type. # # @since 2.0.0 def bson_type BSON::Hash::BSON_TYPE end # Get the chunk data. # # @example Get the chunk data. # chunk.data # # @return [ BSON::Binary ] The chunk data. # # @since 2.0.0 def data document[:data] end # Get the chunk id. # # @example Get the chunk id. # chunk.id # # @return [ BSON::ObjectId ] The chunk id. # # @since 2.0.0 def id document[:_id] end # Get the files id. # # @example Get the files id. # chunk.files_id # # @return [ BSON::ObjectId ] The files id. # # @since 2.0.0 def files_id document[:files_id] end # Get the chunk position. # # @example Get the chunk position. # chunk.n # # @return [ Integer ] The chunk position. # # @since 2.0.0 def n document[:n] end # Create the new chunk. # # @example Create the chunk. # Chunk.new(document) # # @param [ BSON::Document ] document The document to create the chunk # from. # # @since 2.0.0 def initialize(document) @document = BSON::Document.new(:_id => BSON::ObjectId.new).merge(document) end # Conver the chunk to BSON for storage. # # @example Convert the chunk to BSON. # chunk.to_bson # # @param [ BSON::ByteBuffer ] buffer The encoded BSON buffer to append to. # @param [ true, false ] validating_keys Whether keys should be validated when serializing. # # @return [ String ] The raw BSON data. # # @since 2.0.0 def to_bson(buffer = BSON::ByteBuffer.new, validating_keys = BSON::Config.validating_keys?) document.to_bson(buffer) end class << self # Takes an array of chunks and assembles them back into the full # piece of raw data. # # @example Assemble the chunks. # Chunk.assemble(chunks) # # @param [ Array ] chunks The chunks. # # @return [ String ] The assembled data. # # @since 2.0.0 def assemble(chunks) chunks.reduce(''){ |data, chunk| data << chunk.data.data } end # Split the provided data into multiple chunks. # # @example Split the data into chunks. # Chunks.split(data) # # @param [ String, IO ] io The raw bytes. # @param [ File::Info ] file_info The files collection file doc. # @param [ Integer ] offset The offset. # # @return [ Array ] The chunks of the data. # # @since 2.0.0 def split(io, file_info, offset = 0) io = StringIO.new(io) if io.is_a?(String) parts = Enumerator.new { |y| y << io.read(file_info.chunk_size) until io.eof? } parts.map.with_index do |bytes, n| file_info.md5.update(bytes) Chunk.new( data: BSON::Binary.new(bytes), files_id: file_info.id, n: n + offset, ) end end end end end end end mongo-2.5.1/lib/mongo/grid/file.rb0000644000004100000410000000724313257253113016754 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/grid/file/chunk' require 'mongo/grid/file/info' module Mongo module Grid # A representation of a file in the database. # # @since 2.0.0 # # @deprecated Please use the 'stream' API on a FSBucket instead. # Will be removed in driver version 3.0. class File extend Forwardable # Delegate to file info for convenience. def_delegators :info, :chunk_size, :content_type, :filename, :id, :md5, :upload_date # @return [ Array ] chunks The file chunks. attr_reader :chunks # @return [ File::Info ] info The file information. attr_reader :info # Check equality of files. # # @example Check the equality of files. # file == other # # @param [ Object ] other The object to check against. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(File) chunks == other.chunks && info == other.info end # Initialize the file. # # @example Create the file. # Grid::File.new(data, :filename => 'test.txt') # # @param [ IO, String, Array ] data The file object, file # contents or chunks. # @param [ BSON::Document, Hash ] options The info options. # # @option options [ String ] :filename Required name of the file. # @option options [ String ] :content_type The content type of the file. # Deprecated, please use the metadata document instead. # @option options [ String ] :metadata Optional file metadata. # @option options [ Integer ] :chunk_size Override the default chunk # size. # @option opts [ Array ] :aliases A list of aliases. # Deprecated, please use the metadata document instead. # # @since 2.0.0 def initialize(data, options = {}) options = options.merge(:length => data.size) unless options[:length] @info = Info.new(options) initialize_chunks!(data) end # Joins chunks into a string. # # @return [ String ] The raw data for the file. # # @since 2.0.0 def data @data ||= Chunk.assemble(chunks) end # Gets a pretty inspection of the file. # # @example Get the file inspection. # file.inspect # # @return [ String ] The file inspection. # # @since 2.0.0 def inspect "#" end private # @note If we have provided an array of BSON::Documents to initialize # with, we have an array of chunk documents and need to create the # chunk objects and assemble the data. If we have an IO object, then # it's the original file data and we must split it into chunks and set # the original data itself. def initialize_chunks!(value) if value.is_a?(Array) @chunks = value.map{ |doc| Chunk.new(doc) } else @chunks = Chunk.split(value, info) end end end end end mongo-2.5.1/lib/mongo/grid/stream.rb0000644000004100000410000000347413257253113017332 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/grid/stream/read' require 'mongo/grid/stream/write' module Mongo module Grid class FSBucket # A stream that reads and writes files from/to the FSBucket. # # @since 2.1.0 module Stream extend self # The symbol for opening a read stream. # # @since 2.1.0 READ_MODE = :r # The symbol for opening a write stream. # # @since 2.1.0 WRITE_MODE = :w # Mapping from mode to stream class. # # @since 2.1.0 MODE_MAP = { READ_MODE => Read, WRITE_MODE => Write }.freeze # Get a stream for reading/writing files from/to the FSBucket. # # @example Get a stream. # FSBucket::Stream.get(fs, FSBucket::READ_MODE, options) # # @param [ FSBucket ] fs The GridFS bucket object. # @param [ FSBucket::READ_MODE, FSBucket::WRITE_MODE ] mode The stream mode. # @param [ Hash ] options The stream options. # # @return [ Stream::Read, Stream::Write ] The stream object. # # @since 2.1.0 def get(fs, mode, options = {}) MODE_MAP[mode].new(fs, options) end end end end end mongo-2.5.1/lib/mongo/error/0000755000004100000410000000000013257253113015706 5ustar www-datawww-datamongo-2.5.1/lib/mongo/error/unsupported_message_type.rb0000644000004100000410000000141313257253113023367 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when trying to get a message type from the registry that doesn't exist. # # @since 2.5.0 class UnsupportedMessageType < Error; end end end mongo-2.5.1/lib/mongo/error/operation_failure.rb0000644000004100000410000000541513257253113021747 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when an operation fails for some reason. # # @since 2.0.0 class OperationFailure < Error extend Forwardable # These are magic error messages that could indicate a master change. # # @since 2.4.2 WRITE_RETRY_MESSAGES = [ 'no master', 'not master', 'could not contact primary', 'Not primary' ].freeze # These are magic error messages that could indicate a cluster # reconfiguration behind a mongos. We cannot check error codes as they # change between versions, for example 15988 which has 2 completely # different meanings between 2.4 and 3.0. # # @since 2.1.1 RETRY_MESSAGES = WRITE_RETRY_MESSAGES + [ 'transport error', 'socket exception', "can't connect", 'connect failed', 'error querying', 'could not get last error', 'connection attempt failed', 'interrupted at shutdown', 'unknown replica set', 'dbclient error communicating with server' ].freeze def_delegators :@result, :operation_time # Can the read operation that caused the error be retried? # # @example Is the error retryable? # error.retryable? # # @return [ true, false ] If the error is retryable. # # @since 2.1.1 def retryable? RETRY_MESSAGES.any?{ |m| message.include?(m) } end # Can the write operation that caused the error be retried? # # @example Is the error retryable for writes? # error.write_retryable? # # @return [ true, false ] If the error is retryable. # # @since 2.4.2 def write_retryable? WRITE_RETRY_MESSAGES.any? { |m| message.include?(m) } end # Create the operation failure. # # @example Create the error object # OperationFailure.new(message, result) # # param [ String ] message The error message. # param [ Operation::Result ] result The result object. # # @since 2.5.0 def initialize(message = nil, result = nil) @result = result super(message) end end end end mongo-2.5.1/lib/mongo/error/invalid_min_pool_size.rb0000644000004100000410000000214313257253113022607 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to create a client with an invalid # min_pool_size option. # # @since 2.4.2 class InvalidMinPoolSize < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidMinPoolSize.new(10, 5) # # @since 2.4.2 def initialize(min, max) super("Invalid min pool size: #{min}. Please ensure that it is less than the max size: #{max}. ") end end end end mongo-2.5.1/lib/mongo/error/missing_file_chunk.rb0000644000004100000410000000243013257253113022072 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the next chunk when reading from a GridFSBucket does not have the # expected sequence number (n). # # @since 2.1.0 class MissingFileChunk < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::MissingFileChunk.new(expected_n, chunk) # # @param [ Integer ] expected_n The expected index value. # @param [ Grid::File::Chunk ] chunk The chunk read from GridFS. # # @since 2.1.0 def initialize(expected_n, chunk) super("Unexpected chunk in sequence. Expected next chunk to have index #{expected_n} but it has index #{chunk.n}") end end end end mongo-2.5.1/lib/mongo/error/invalid_bulk_operation.rb0000644000004100000410000000224213257253113022756 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if an non-existent operation type is used. # # @since 2.0.0 class InvalidBulkOperation < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidBulkOperation.new(name) # # @param [ String ] type The bulk operation type. # @param [ Hash ] operation The bulk operation. # # @since 2.0.0 def initialize(type, operation) super("Invalid document format for bulk #{type} operation: #{operation}.") end end end end mongo-2.5.1/lib/mongo/error/socket_error.rb0000644000004100000410000000132413257253113020734 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when a socket has an error. # # @since 2.0.0 class SocketError < Error; end end end mongo-2.5.1/lib/mongo/error/invalid_txt_record.rb0000644000004100000410000000163013257253113022116 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the URI Parser's query returns too many # TXT records or the record specifies invalid options. # # @example Instantiate the exception. # Mongo::Error::InvalidTXTRecord.new(message) # # @since 2.5.0 class InvalidTXTRecord < Error; end end end mongo-2.5.1/lib/mongo/error/invalid_document.rb0000644000004100000410000000207013257253113021556 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if the object is not a valid document. # # @since 2.0.0 class InvalidDocument < Error # The error message. # # @since 2.0.0 MESSAGE = 'Invalid document provided.'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidDocument.new # # @since 2.0.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/unchangeable_collection_option.rb0000644000004100000410000000244713257253113024461 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if a new collection is created from an existing one and options other than the # changeable ones are provided. # # @since 2.1.0 class UnchangeableCollectionOption < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnchangeableCollectionOption.new(option) # # @param [ String, Symbol ] option The option that was attempted to be changed. # # @since 2.1.0 def initialize(option) super("The option #{option} cannot be set on a new collection instance." + " The options that can be updated are #{Collection::CHANGEABLE_OPTIONS}") end end end end mongo-2.5.1/lib/mongo/error/invalid_database_name.rb0000644000004100000410000000217713257253113022514 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to create a database with no name. # # @since 2.0.0 class InvalidDatabaseName < Error # The message is constant. # # @since 2.0.0 MESSAGE = 'nil is an invalid database name. Please provide a string or symbol.'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidDatabaseName.new # # @since 2.0.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/invalid_replacement_document.rb0000644000004100000410000000214013257253113024133 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if the object is not a valid replacement document. # # @since 2.0.0 class InvalidReplacementDocument < Error # The error message. # # @since 2.0.0 MESSAGE = 'Invalid replacement document provided'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidReplacementDoc.new # # @since 2.0.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/parser.rb0000644000004100000410000000505013257253113017527 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Class for parsing the various forms that errors can come in from MongoDB # command responses. # # @since 2.0.0 class Parser # @return [ BSON::Document ] document The returned document. attr_reader :document # @return [ String ] message The error message parsed from the document. attr_reader :message # @return [ Array ] replies The message replies. attr_reader :replies # Create the new parser with the returned document. # # @example Create the new parser. # Parser.new({ 'errmsg' => 'failed' }) # # @param [ BSON::Document ] document The returned document. # # @since 2.0.0 def initialize(document, replies = nil) @document = document || {} @replies = replies parse! end private def parse! @message = "" parse_single(@message, ERR) parse_single(@message, ERROR) parse_single(@message, ERRMSG) parse_multiple(@message, WRITE_ERRORS) parse_single(@message, ERRMSG, document[WRITE_CONCERN_ERROR]) if document[WRITE_CONCERN_ERROR] parse_flag(@message) end def parse_single(message, key, doc = document) if error = doc[key] append(message ,"#{error} (#{doc[CODE]})") end end def parse_multiple(message, key) if errors = document[key] errors.each do |error| parse_single(message, ERRMSG, error) end end end def parse_flag(message) if replies && replies.first && (replies.first.respond_to?(:cursor_not_found?)) && replies.first.cursor_not_found? append(message, CURSOR_NOT_FOUND) end end def append(message, error) if message.length > 1 message.concat(", #{error}") else message.concat(error) end end end end end mongo-2.5.1/lib/mongo/error/bulk_write_error.rb0000644000004100000410000000231413257253113021613 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if there are write errors upon executing the bulk # operation. # # @since 2.0.0 class BulkWriteError < Error # @return [ BSON::Document ] result The error result. attr_reader :result # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::BulkWriteFailure.new(response) # # @param [ Hash ] result A processed response from the server # reporting results of the operation. # # @since 2.0.0 def initialize(result) @result = result end end end end mongo-2.5.1/lib/mongo/error/invalid_signature.rb0000644000004100000410000000305113257253113021741 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the server verifier does not match the # expected signature on the client. # # @since 2.0.0 class InvalidSignature < Error # @return [ String ] verifier The server verifier string. attr_reader :verifier # @return [ String ] server_signature The expected server signature. attr_reader :server_signature # Create the new exception. # # @example Create the new exception. # InvalidSignature.new(verifier, server_signature) # # @param [ String ] verifier The verifier returned from the server. # @param [ String ] server_signature The expected value from the # server. # # @since 2.0.0 def initialize(verifier, server_signature) @verifier = verifier @server_signature = server_signature super("Expected server verifier '#{verifier}' to match '#{server_signature}'.") end end end end mongo-2.5.1/lib/mongo/error/invalid_file_revision.rb0000644000004100000410000000223613257253113022601 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the requested file revision is not found. # # @since 2.1.0 class InvalidFileRevision < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::InvalidFileRevision.new('some-file.txt', 3) # # @param [ String ] filename The name of the file. # @param [ Integer ] revision The requested revision. # # @since 2.1.0 def initialize(filename, revision) super("No revision #{revision} found for file '#{filename}'.") end end end end mongo-2.5.1/lib/mongo/error/invalid_bulk_operation_type.rb0000644000004100000410000000212213257253113024014 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if an non-existent operation type is used. # # @since 2.0.0 class InvalidBulkOperationType < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidBulkOperationType.new(type) # # @param [ String ] type The attempted operation type. # # @since 2.0.0 def initialize(type) super("Invalid bulk operation type: #{type}.") end end end end mongo-2.5.1/lib/mongo/error/closed_stream.rb0000644000004100000410000000202213257253113021053 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the Grid::FSBucket::Stream object is closed and an operation is attempted. # # @since 2.1.0 class ClosedStream < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::ClosedStream.new # # @since 2.1.0 def initialize super("The stream is closed and cannot be written to or read from.") end end end end mongo-2.5.1/lib/mongo/error/socket_timeout_error.rb0000644000004100000410000000134313257253113022503 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when a socket connection times out. # # @since 2.0.0 class SocketTimeoutError < Error; end end end mongo-2.5.1/lib/mongo/error/invalid_session.rb0000644000004100000410000000203213257253113021421 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when a session is attempted to be used and it is invalid. # # @since 2.5.0 class InvalidSession < Error # Create the new exception. # # @example Create the new exception. # InvalidSession.new(message) # # @param [ String ] message The error message. # # @since 2.5.0 def initialize(message) super(message) end end end end mongo-2.5.1/lib/mongo/error/max_bson_size.rb0000644000004100000410000000227113257253113021075 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to serialize a document that # exceeds max BSON object size. # # @since 2.0.0 class MaxBSONSize < Error # The message is constant. # # @since 2.0.0 MESSAGE = "Document exceeds allowed max BSON size.".freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::MaxBSONSize.new(max) # # @since 2.0.0 def initialize(max_size = nil) super(max_size ? MESSAGE + " The max is #{max_size}." : MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/unsupported_features.rb0000644000004100000410000000141713257253113022524 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when the driver does not support the complete set of server # features. # # @since 2.0.0 class UnsupportedFeatures < Error end end end mongo-2.5.1/lib/mongo/error/no_server_available.rb0000644000004100000410000000252513257253113022241 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if there are no servers available matching the preference. # # @since 2.0.0 class NoServerAvailable < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::NoServerAvailable.new(server_selector) # # @param [ Hash ] server_selector The server preference that could not be # satisfied. # # @since 2.0.0 def initialize(server_selector) super("No server is available matching preference: #{server_selector.inspect} " + "using server_selection_timeout=#{server_selector.server_selection_timeout} " + "and local_threshold=#{server_selector.local_threshold}") end end end end mongo-2.5.1/lib/mongo/error/file_not_found.rb0000644000004100000410000000226313257253113021230 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if a file is deleted from a GridFS but it is not found. # # @since 2.1.0 class FileNotFound < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::FileNotFound.new(id, :id) # # @param [ Object ] value The property value used to find the file. # @param [ String, Symbol ] property The name of the property used to find the file. # # @since 2.1.0 def initialize(value, property) super("File with #{property} '#{value}' not found.") end end end end mongo-2.5.1/lib/mongo/error/unsupported_array_filters.rb0000644000004100000410000000366513257253113023563 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the array filters option is specified for an operation but the server # selected does not support array filters. # # @since 2.5.0 class UnsupportedArrayFilters < Error # The default error message describing that array filters are not supported. # # @return [ String ] A default message describing that array filters are not supported by the server. # # @since 2.5.0 DEFAULT_MESSAGE = "The array_filters option is not a supported feature of the server handling this operation. " + "Operation results may be unexpected.".freeze # The error message describing that array filters cannot be used when write concern is unacknowledged. # # @return [ String ] A message describing that array filters cannot be used when write concern is unacknowledged. # # @since 2.5.0 UNACKNOWLEDGED_WRITES_MESSAGE = "The array_filters option cannot be specified when using unacknowledged writes. " + "Either remove the array_filters option or use acknowledged writes (w >= 1).".freeze # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnsupportedArrayFilters.new # # @since 2.5.0 def initialize(message = nil) super(message || DEFAULT_MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/extra_file_chunk.rb0000644000004100000410000000170013257253113021543 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if an extra chunk is found. # # @since 2.1.0 class ExtraFileChunk < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::ExtraFileChunk.new # # @since 2.1.0 def initialize super("Extra file chunk found.") end end end end mongo-2.5.1/lib/mongo/error/invalid_update_document.rb0000644000004100000410000000212113257253113023115 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if the object is not a valid update document. # # @since 2.0.0 class InvalidUpdateDocument < Error # The error message. # # @since 2.0.0 MESSAGE = 'Invalid update document provided'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidUpdateDocument.new # # @since 2.0.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/invalid_file.rb0000644000004100000410000000233613257253113020664 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the file md5 and server md5 do not match when acknowledging # GridFS writes. # # @since 2.0.0 class InvalidFile < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::InvalidFile.new(file_md5, server_md5) # # @param [ String ] client_md5 The client side file md5. # @param [ String ] server_md5 The server side file md5. # # @since 2.0.0 def initialize(client_md5, server_md5) super("File MD5 on client side is #{client_md5} but the server reported #{server_md5}.") end end end end mongo-2.5.1/lib/mongo/error/unexpected_chunk_length.rb0000644000004100000410000000252113257253113023130 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the next chunk when reading from a GridFSBucket does not have the # expected length. # # @since 2.1.0 class UnexpectedChunkLength < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnexpectedChunkLength.new(expected_len, chunk) # # @param [ Integer ] expected_len The expected length. # @param [ Grid::File::Chunk ] chunk The chunk read from GridFS. # # @since 2.1.0 def initialize(expected_len, chunk) super("Unexpected chunk length. Chunk has length #{chunk.data.data.size} but expected length " + "#{expected_len} or for it to be the last chunk in the sequence.") end end end end mongo-2.5.1/lib/mongo/error/no_srv_records.rb0000644000004100000410000000153613257253113021267 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the URI Parser's DNS query returns no SRV records. # # @example Instantiate the exception. # Mongo::Error::NoSRVRecords.new(message) # # @since 2.5.0 class NoSRVRecords < Error; end end end mongo-2.5.1/lib/mongo/error/invalid_collection_name.rb0000644000004100000410000000220213257253113023070 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to create a collection with no name. # # @since 2.0.0 class InvalidCollectionName < Error # The message is constant. # # @since 2.0.0 MESSAGE = 'nil is an invalid collection name. Please provide a string or symbol.'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Collection::InvalidName.new # # @since 2.0.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/invalid_nonce.rb0000644000004100000410000000257713257253113021056 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the server nonce returned does not # match the client nonce sent to it. # # @since 2.0.0 class InvalidNonce < Error # @return [ String ] nonce The client nonce. attr_reader :nonce # @return [ String ] rnonce The server nonce. attr_reader :rnonce # Instantiate the new exception. # # @example Create the exception. # InvalidNonce.new(nonce, rnonce) # # @param [ String ] nonce The client nonce. # @param [ String ] rnonce The server nonce. # # @since 2.0.0 def initialize(nonce, rnonce) @nonce = nonce @rnonce = rnonce super("Expected server rnonce '#{rnonce}' to start with client nonce '#{nonce}'.") end end end end mongo-2.5.1/lib/mongo/error/max_message_size.rb0000644000004100000410000000237513257253113021565 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to send a message that exceeds max # message size. # # @since 2.0.0 class MaxMessageSize < Error # The message is constant. # # @since 2.0.0 MESSAGE = "Message exceeds allowed max message size.".freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::MaxMessageSize.new(max) # # @param [ Integer ] max_size The maximum message size. # # @since 2.0.0 def initialize(max_size = nil) super(max_size ? MESSAGE + " The max is #{max_size}." : MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/unknown_payload_type.rb0000644000004100000410000000235113257253113022505 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if an unknown payload type is encountered when an OP_MSG is created or read. # # @since 2.5.0 class UnknownPayloadType < Error # The error message. # # @since 2.5.0 MESSAGE = 'Unknown payload type (%s) encountered when creating or reading an OP_MSG wire protocol message.' # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnknownPayloadType.new(byte) # # @param [ String ] byte The unknown payload type. # # @since 2.5.0 def initialize(byte) super(MESSAGE % byte.inspect) end end end end mongo-2.5.1/lib/mongo/error/mismatched_domain.rb0000644000004100000410000000166013257253113021703 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the URI Parser's DNS query returns SRV record(s) # whose parent domain does not match the hostname used for the query. # # @example Instantiate the exception. # Mongo::Error::MismatchedDomain.new(message) # # @since 2.5.0 class MismatchedDomain < Error; end end end mongo-2.5.1/lib/mongo/error/missing_resume_token.rb0000644000004100000410000000215413257253113022466 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if a change stream document is returned without a resume token. # # @since 2.5.0 class MissingResumeToken < Error # The error message. # # @since 2.5.0 MESSAGE = 'Cannot provide resume functionality when the resume token is missing'.freeze # Create the new exception. # # @example Create the new exception. # Mongo::Error::MissingResumeToken.new # # @since 2.5.0 def initialize super(MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/invalid_server_preference.rb0000644000004100000410000000423313257253113023447 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when an invalid server preference is provided. # # @since 2.0.0 class InvalidServerPreference < Error # Error message when tags are specified for a read preference that cannot support them. # # @since 2.4.0 NO_TAG_SUPPORT = 'This read preference cannot be combined with tags.'.freeze # Error message when a max staleness is specified for a read preference that cannot support it. # # @since 2.4.0 NO_MAX_STALENESS_SUPPORT = 'max_staleness cannot be set for this read preference.'.freeze # Error message for when the max staleness is not at least twice the heartbeat frequency. # # @since 2.4.0 INVALID_MAX_STALENESS = "`max_staleness` value is too small. It must be at least " + "`ServerSelector::SMALLEST_MAX_STALENESS_SECONDS` and (the cluster's heartbeat_frequency " + "setting + `Cluster::IDLE_WRITE_PERIOD_SECONDS`).".freeze # Error message when max staleness cannot be used because one or more servers has version < 3.4. # # @since 2.4.0 NO_MAX_STALENESS_WITH_LEGACY_SERVER = 'max_staleness can only be set for a cluster in which ' + 'each server is at least version 3.4.'.freeze # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidServerPreference.new # # @param [ String ] message The error message. # # @since 2.0.0 def initialize(message) super(message) end end end end mongo-2.5.1/lib/mongo/error/need_primary_server.rb0000644000004100000410000000135513257253113022303 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when a primary server is needed but not found. # # @since 2.0.0 class NeedPrimaryServer < Error; end end end mongo-2.5.1/lib/mongo/error/unsupported_collation.rb0000644000004100000410000000354413257253113022675 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if a collation is specified for an operation but the server selected does not # support collations. # # @since 2.4.0 class UnsupportedCollation < Error # The default error message describing that collations is not supported. # # @return [ String ] A default message describing that collations is not supported by the server. # # @since 2.4.0 DEFAULT_MESSAGE = "Collations is not a supported feature of the server handling this operation. " + "Operation results may be unexpected." # The error message describing that collations cannot be used when write concern is unacknowledged. # # @return [ String ] A message describing that collations cannot be used when write concern is unacknowledged. # # @since 2.4.0 UNACKNOWLEDGED_WRITES_MESSAGE = "A collation cannot be specified when using unacknowledged writes. " + "Either remove the collation option or use acknowledged writes (w >= 1)." # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnsupportedCollation.new # # @since 2.4.0 def initialize(message = nil) super(message || DEFAULT_MESSAGE) end end end end mongo-2.5.1/lib/mongo/error/multi_index_drop.rb0000644000004100000410000000203213257253113021575 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception raised if '*' is passed to drop_one on indexes. # # @since 2.0.0 class MultiIndexDrop < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::MultiIndexDrop.new # # @since 2.0.0 def initialize super("Passing '*' to #drop_one would cause all indexes to be dropped. Please use #drop_all") end end end end mongo-2.5.1/lib/mongo/error/invalid_application_name.rb0000644000004100000410000000241313257253113023244 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # This exception is raised when the metadata document sent to the server # at the time of a connection handshake is invalid. # # @since 2.4.0 class InvalidApplicationName < Error # Instantiate the new exception. # # @example Create the exception. # InvalidApplicationName.new(app_name, 128) # # @param [ String ] app_name The application name option. # @param [ Integer ] max_size The max byte size of the application name. # # @since 2.4.0 def initialize(app_name, max_size) super("The provided application name '#{app_name}' cannot exceed #{max_size} bytes.") end end end end mongo-2.5.1/lib/mongo/error/unexpected_response.rb0000644000004100000410000000251613257253113022321 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised if the response read from the socket does not match the latest query. # # @since 2.2.6 class UnexpectedResponse < Error # Create the new exception. # # @example Create the new exception. # Mongo::Error::UnexpectedResponse.new(expected_response_to, response_to) # # @param [ Integer ] expected_response_to The last request id sent. # @param [ Integer ] response_to The actual response_to of the reply. # # @since 2.2.6 def initialize(expected_response_to, response_to) super("Unexpected response. Got response for request ID #{response_to} " + "but expected response for request ID #{expected_response_to}") end end end end mongo-2.5.1/lib/mongo/error/invalid_write_concern.rb0000644000004100000410000000220113257253113022575 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Raised when an invalid write concern is provided. # # @since 2.2.0 class InvalidWriteConcern < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidWriteConcern.new # # @since 2.2.0 def initialize super('Invalid write concern options. If w is an Integer, it must be greater than or equal to 0. ' + 'If w is 0, it cannot be combined with a true value for fsync or j (journal).') end end end end mongo-2.5.1/lib/mongo/error/invalid_uri.rb0000644000004100000410000000243513257253113020544 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Error # Exception that is raised when trying to parse a URI that does not match # the specification. # # @since 2.0.0 class InvalidURI < Error # Instantiate the new exception. # # @example Instantiate the exception. # Mongo::Error::InvalidURI.new(uri, details, format) # # @since 2.0.0 def initialize(uri, details, format = nil) message = "Bad URI: #{uri}\n" + "#{details}\n" message += "MongoDB URI must be in the following format: #{format}\n" if format message += "Please see the following URL for more information: #{Mongo::URI::HELP}\n" super(message) end end end end mongo-2.5.1/lib/mongo/write_concern.rb0000644000004100000410000000615713257253113017754 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/write_concern/normalizable' require 'mongo/write_concern/acknowledged' require 'mongo/write_concern/unacknowledged' module Mongo # Base module for all write concern specific behaviour. # # @since 2.0.0 module WriteConcern extend self # The number of servers write concern. # # @since 2.0.0 W = :w.freeze # The journal write concern. # # @since 2.0.0 J = :j.freeze # The file sync write concern. # # @since 2.0.0 FSYNC = :fsync.freeze # The wtimeout write concern. # # @since 2.0.0 WTIMEOUT = :wtimeout.freeze # The GLE command name. # # @since 2.0.0 GET_LAST_ERROR = :getlasterror.freeze # The default write concern is to acknowledge on a single server. # # @since 2.0.0 DEFAULT = { }.freeze # Get a write concern mode for the provided options. # # @example Get a write concern mode. # Mongo::WriteConcern.get(:w => 1) # # @param [ Hash ] options The options to instantiate with. # # @option options :w [ Integer, String ] The number of servers or the # custom mode to acknowledge. # @option options :j [ true, false ] Whether to acknowledge a write to # the journal. # @option options :fsync [ true, false ] Should the write be synced to # disc. # @option options :wtimeout [ Integer ] The number of milliseconds to # wait for acknowledgement before raising an error. # # @return [ Unacknowledged, Acknowledged ] The appropriate concern. # # @raise [ Error::InvalidWriteConcern ] If the write concern is invalid. # # @since 2.0.0 def get(options) return options if options.is_a?(Unacknowledged) || options.is_a?(Acknowledged) if options validate!(options) if unacknowledged?(options) Unacknowledged.new(options) else Acknowledged.new(options) end end end private def validate!(options) if options[W] if options[W] == 0 && (options[J] || options[FSYNC]) raise Mongo::Error::InvalidWriteConcern.new elsif options[W].is_a?(Integer) && options[W] < 0 raise Mongo::Error::InvalidWriteConcern.new end end end # Determine if the options are for an unacknowledged write concern. # # @api private # # @param [ Hash ] options The options to check. # # @return [ true, false ] If the options are unacknowledged. # # @since 2.0.0 def unacknowledged?(options) options[W] == 0 end end end mongo-2.5.1/lib/mongo/session/0000755000004100000410000000000013257253113016240 5ustar www-datawww-datamongo-2.5.1/lib/mongo/session/session_pool.rb0000644000004100000410000000730013257253113021301 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Session # A pool of server sessions. # # @api private # # @since 2.5.0 class SessionPool # Create a SessionPool. # # @example # SessionPool.create(cluster) # # @param [ Mongo::Cluster ] cluster The cluster that will be associated with this # session pool. # # @since 2.5.0 def self.create(cluster) pool = new(cluster) cluster.instance_variable_set(:@session_pool, pool) end # Initialize a SessionPool. # # @example # SessionPool.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster that will be associated with this # session pool. # # @since 2.5.0 def initialize(cluster) @queue = [] @mutex = Mutex.new @cluster = cluster end # Get a formatted string for use in inspection. # # @example Inspect the session pool object. # session_pool.inspect # # @return [ String ] The session pool inspection. # # @since 2.5.0 def inspect "#" end # Checkout a server session from the pool. # # @example Checkout a session. # pool.checkout # # @return [ ServerSession ] The server session. # # @since 2.5.0 def checkout @mutex.synchronize do loop do if @queue.empty? return ServerSession.new else session = @queue.shift unless about_to_expire?(session) return session end end end end end # Checkin a server session to the pool. # # @example Checkin a session. # pool.checkin(session) # # @param [ Session::ServerSession ] session The session to checkin. # # @since 2.5.0 def checkin(session) @mutex.synchronize do prune! unless about_to_expire?(session) @queue.unshift(session) end end end # End all sessions in the pool by sending the endSessions command to the server. # # @example End all sessions. # pool.end_sessions # # @since 2.5.0 def end_sessions while !@queue.empty? server = ServerSelector.get(mode: :primary_preferred).select_server(@cluster) Operation::Commands::Command.new( :selector => {endSessions: @queue.shift(10_000).collect { |s| s.session_id }}, :db_name => Database::ADMIN).execute(server) end rescue end private def about_to_expire?(session) if @cluster.logical_session_timeout idle_time_minutes = (Time.now - session.last_use) / 60 (idle_time_minutes + 1) >= @cluster.logical_session_timeout end end def prune! while !@queue.empty? if about_to_expire?(@queue[-1]) @queue.pop else break end end end end end end mongo-2.5.1/lib/mongo/session/server_session.rb0000644000004100000410000000521113257253113021635 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Session # An object representing the server-side session. # # @api private # # @since 2.5.0 class ServerSession # Regex for removing dashes from the UUID string. # # @since 2.5.0 DASH_REGEX = /\-/.freeze # Pack directive for the UUID. # # @since 2.5.0 UUID_PACK = 'H*'.freeze # The last time the server session was used. # # @since 2.5.0 attr_reader :last_use # Initialize a ServerSession. # # @example # ServerSession.new # # @since 2.5.0 def initialize set_last_use! session_id @txn_num = -1 end # Update the last_use attribute of the server session to now. # # @example Set the last use field to now. # server_session.set_last_use! # # @return [ Time ] The last time the session was used. # # @since 2.5.0 def set_last_use! @last_use = Time.now end # The session id of this server session. # # @example Get the session id. # server_session.session_id # # @return [ BSON::Document ] The session id. # # @since 2.5.0 def session_id @session_id ||= (bytes = [SecureRandom.uuid.gsub(DASH_REGEX, '')].pack(UUID_PACK) BSON::Document.new(id: BSON::Binary.new(bytes, :uuid))) end # Increment and return the next transaction number. # # @example Get the next transaction number. # server_session.next_txn_num # # @return [ Integer ] The next transaction number. # # @since 2.5.0 def next_txn_num @txn_num += 1 end # Get a formatted string for use in inspection. # # @example Inspect the session object. # session.inspect # # @return [ String ] The session inspection. # # @since 2.5.0 def inspect "#" end end end end mongo-2.5.1/lib/mongo/session.rb0000644000004100000410000002063613257253113016574 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/session/session_pool' require 'mongo/session/server_session' module Mongo # A logical session representing a set of sequential operations executed # by an application that are related in some way. # # @since 2.5.0 class Session extend Forwardable # Get the options for this session. # # @since 2.5.0 attr_reader :options # Get the cluster through which this session was created. # # @since 2.5.1 attr_reader :cluster # The cluster time for this session. # # @since 2.5.0 attr_reader :cluster_time # The latest seen operation time for this session. # # @since 2.5.0 attr_reader :operation_time # Error message indicating that the session was retrieved from a client with a different cluster than that of the # client through which it is currently being used. # # @since 2.5.0 MISMATCHED_CLUSTER_ERROR_MSG = 'The configuration of the client used to create this session does not match that ' + 'of the client owning this operation. Please only use this session for operations through its parent ' + 'client.'.freeze # Error message describing that the session cannot be used because it has already been ended. # # @since 2.5.0 SESSION_ENDED_ERROR_MSG = 'This session has ended and cannot be used. Please create a new one.'.freeze # Error message describing that sessions are not supported by the server version. # # @since 2.5.0 SESSIONS_NOT_SUPPORTED = 'Sessions are not supported by the connected servers.'.freeze # Initialize a Session. # # @example # Session.new(server_session, cluster, options) # # @param [ ServerSession ] server_session The server session this session is associated with. # @param [ Cluster ] cluster The cluster through which this session is created. # @param [ Hash ] options The options for this session. # # @since 2.5.0 def initialize(server_session, cluster, options = {}) @server_session = server_session @cluster = cluster @options = options.dup.freeze @cluster_time = nil end # Get a formatted string for use in inspection. # # @example Inspect the session object. # session.inspect # # @return [ String ] The session inspection. # # @since 2.5.0 def inspect "#" end # End this session. # # @example # session.end_session # # @return [ nil ] Always nil. # # @since 2.5.0 def end_session if !ended? && @cluster @cluster.session_pool.checkin(@server_session) end ensure @server_session = nil end # Whether this session has ended. # # @example # session.ended? # # @return [ true, false ] Whether the session has ended. # # @since 2.5.0 def ended? @server_session.nil? end # Add this session's id to a command document. # # @example # session.add_id!(cmd) # # @return [ Hash, BSON::Document ] The command document. # # @since 2.5.0 def add_id!(command) command.merge!(lsid: session_id) end # Validate the session. # # @example # session.validate!(cluster) # # @param [ Cluster ] cluster The cluster the session is attempted to be used with. # # @return [ nil ] nil if the session is valid. # # @raise [ Mongo::Error::InvalidSession ] Raise error if the session is not valid. # # @since 2.5.0 def validate!(cluster) check_matching_cluster!(cluster) check_if_ended! self end # Process a response from the server that used this session. # # @example Process a response from the server. # session.process(result) # # @param [ Operation::Result ] result The result from the operation. # # @return [ Operation::Result ] The result. # # @since 2.5.0 def process(result) unless implicit? set_operation_time(result) set_cluster_time(result) end @server_session.set_last_use! result end # Advance the cached cluster time document for this session. # # @example Advance the cluster time. # session.advance_cluster_time(doc) # # @param [ BSON::Document, Hash ] new_cluster_time The new cluster time. # # @return [ BSON::Document, Hash ] The new cluster time. # # @since 2.5.0 def advance_cluster_time(new_cluster_time) if @cluster_time @cluster_time = [ @cluster_time, new_cluster_time ].max_by { |doc| doc[Cluster::CLUSTER_TIME] } else @cluster_time = new_cluster_time end end # Advance the cached operation time for this session. # # @example Advance the operation time. # session.advance_operation_time(timestamp) # # @param [ BSON::Timestamp ] new_operation_time The new operation time. # # @return [ BSON::Timestamp ] The max operation time, considering the current and new times. # # @since 2.5.0 def advance_operation_time(new_operation_time) if @operation_time @operation_time = [ @operation_time, new_operation_time ].max else @operation_time = new_operation_time end end # Will writes executed with this session be retried. # # @example Will writes be retried. # session.retry_writes? # # @return [ true, false ] If writes will be retried. # # @note Retryable writes are only available on server versions at least 3.6 and with # sharded clusters or replica sets. # # @since 2.5.0 def retry_writes? !!cluster.options[:retry_writes] && (cluster.replica_set? || cluster.sharded?) end # Get the session id. # # @example Get the session id. # session.session_id # # @return [ BSON::Document ] The session id. # # @since 2.5.0 def session_id @server_session.session_id if @server_session end # Increment and return the next transaction number. # # @example Get the next transaction number. # session.next_txn_num # # @return [ Integer ] The next transaction number. # # @since 2.5.0 def next_txn_num @server_session.next_txn_num if @server_session end # Is this session an implicit one (not user-created). # # @example Is the session implicit? # session.implicit? # # @return [ true, false ] Whether this session is implicit. # # @since 2.5.1 def implicit? @implicit_session ||= !!(@options.key?(:implicit) && @options[:implicit] == true) end private def causal_consistency_doc(read_concern) if operation_time && causal_consistency? (read_concern || {}).merge(:afterClusterTime => operation_time) else read_concern end end def causal_consistency? @causal_consistency ||= (if @options.key?(:causal_consistency) @options[:causal_consistency] == true else true end) end def set_operation_time(result) if result && result.operation_time @operation_time = result.operation_time end end def set_cluster_time(result) if cluster_time_doc = result.cluster_time if @cluster_time.nil? @cluster_time = cluster_time_doc elsif cluster_time_doc[Cluster::CLUSTER_TIME] > @cluster_time[Cluster::CLUSTER_TIME] @cluster_time = cluster_time_doc end end end def check_if_ended! raise Mongo::Error::InvalidSession.new(SESSION_ENDED_ERROR_MSG) if ended? end def check_matching_cluster!(cluster) if @cluster != cluster raise Mongo::Error::InvalidSession.new(MISMATCHED_CLUSTER_ERROR_MSG) end end end end mongo-2.5.1/lib/mongo/cursor/0000755000004100000410000000000013257253113016072 5ustar www-datawww-datamongo-2.5.1/lib/mongo/cursor/builder/0000755000004100000410000000000013257253113017520 5ustar www-datawww-datamongo-2.5.1/lib/mongo/cursor/builder/get_more_command.rb0000644000004100000410000000431213257253113023344 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cursor module Builder # Generates a specification for a get more command. # # @since 2.2.0 class GetMoreCommand extend Forwardable # @return [ Cursor ] cursor The cursor. attr_reader :cursor def_delegators :@cursor, :collection_name, :database, :view def_delegators :view, :batch_size # Create the new builder. # # @example Create the builder. # GetMoreCommand.new(cursor) # # @param [ Cursor ] cursor The cursor. # @param [ Session ] session The session. # # @since 2.2.0 def initialize(cursor, session = nil) @cursor = cursor @session = session end # Get the specification. # # @example Get the specification. # get_more_command.specification # # @return [ Hash ] The spec. # # @since 2.2.0 def specification { selector: get_more_command, db_name: database.name, session: @session } end private def get_more_command command = { :getMore => cursor.id, :collection => collection_name } command[:batchSize] = batch_size.abs if batch_size && batch_size != 0 # If the max_await_time_ms option is set, then we set maxTimeMS on # the get more command. if view.respond_to?(:max_await_time_ms) if view.max_await_time_ms && view.options[:await_data] command[:maxTimeMS] = view.max_await_time_ms end end command end end end end end mongo-2.5.1/lib/mongo/cursor/builder/op_kill_cursors.rb0000644000004100000410000000462613257253113023266 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cursor module Builder # Encapsulates behaviour around generating an OP_KILL_CURSORS specification. # # @since 2.2.0 class OpKillCursors extend Forwardable # @return [ Cursor ] cursor The cursor. attr_reader :cursor def_delegators :@cursor, :collection_name, :database # Create the new builder. # # @example Create the builder. # OpKillCursors.new(cursor) # # @param [ Cursor ] cursor The cursor. # # @since 2.2.0 def initialize(cursor) @cursor = cursor end # Get the specification. # # @example Get the specification. # op_kill_cursors.specification # # @return [ Hash ] The specification. # # @since 2.2.0 def specification { :coll_name => collection_name, :db_name => database.name, :cursor_ids => [ cursor.id ] } end class << self # Update a specification's list of cursor ids. # # @example Update a specification's list of cursor ids. # OpKillCursors.update_cursors(spec, ids) # # @return [ Hash ] The specification. # @return [ Array ] The ids to update with. # # @since 2.3.0 def update_cursors(spec, ids) spec.merge!(cursor_ids: spec[:cursor_ids] & ids) end # Get the list of cursor ids from a spec generated by this Builder. # # @example Get the list of cursor ids. # OpKillCursors.cursors(spec) # # @return [ Hash ] The specification. # # @since 2.3.0 def get_cursors_list(spec) spec[:cursor_ids] end end end end end end mongo-2.5.1/lib/mongo/cursor/builder/op_get_more.rb0000644000004100000410000000321713257253113022347 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cursor module Builder # Encapsulates behaviour around generating an OP_GET_MORE specification. # # @since 2.2.0 class OpGetMore extend Forwardable # @return [ Cursor ] cursor The cursor. attr_reader :cursor def_delegators :@cursor, :collection_name, :database, :to_return # Create the new builder. # # @example Create the builder. # OpGetMore.new(cursor) # # @param [ Cursor ] cursor The cursor. # # @since 2.2.0 def initialize(cursor) @cursor = cursor end # Get the specification. # # @example Get the specification. # op_get_more.specification # # @return [ Hash ] The specification. # # @since 2.2.0 def specification { :to_return => to_return, :cursor_id => cursor.id, :db_name => database.name, :coll_name => collection_name } end end end end end mongo-2.5.1/lib/mongo/cursor/builder/kill_cursors_command.rb0000644000004100000410000000502113257253113024254 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Cursor module Builder # Generates a specification for a kill cursors command. # # @since 2.2.0 class KillCursorsCommand extend Forwardable # @return [ Cursor ] cursor The cursor. attr_reader :cursor def_delegators :@cursor, :collection_name, :database # Create the new builder. # # @example Create the builder. # KillCursorsCommand.new(cursor) # # @param [ Cursor ] cursor The cursor. # # @since 2.2.0 def initialize(cursor) @cursor = cursor end # Get the specification. # # @example Get the specification. # kill_cursors_command.specification # # @return [ Hash ] The spec. # # @since 2.2.0 def specification { selector: kill_cursors_command, db_name: database.name } end private def kill_cursors_command { :killCursors => collection_name, :cursors => [ cursor.id ] } end class << self # Update a specification's list of cursor ids. # # @example Update a specification's list of cursor ids. # KillCursorsCommand.update_cursors(spec, ids) # # @return [ Hash ] The specification. # @return [ Array ] The ids to update with. # # @since 2.3.0 def update_cursors(spec, ids) spec[:selector].merge!(cursors: spec[:selector][:cursors] & ids) end # Get the list of cursor ids from a spec generated by this Builder. # # @example Get the list of cursor ids. # KillCursorsCommand.cursors(spec) # # @return [ Hash ] The specification. # # @since 2.3.0 def get_cursors_list(spec) spec[:selector][:cursors] end end end end end end mongo-2.5.1/lib/mongo/cursor/builder.rb0000644000004100000410000000140713257253113020047 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/cursor/builder/op_get_more' require 'mongo/cursor/builder/op_kill_cursors' require 'mongo/cursor/builder/get_more_command' require 'mongo/cursor/builder/kill_cursors_command' mongo-2.5.1/lib/mongo/collection.rb0000644000004100000410000006655413257253113017255 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/bulk_write' require 'mongo/collection/view' module Mongo # Represents a collection in the database and operations that can directly be # applied to one. # # @since 2.0.0 class Collection extend Forwardable include Retryable # The capped option. # # @since 2.1.0 CAPPED = 'capped'.freeze # The ns field constant. # # @since 2.1.0 NS = 'ns'.freeze # @return [ Mongo::Database ] The database the collection resides in. attr_reader :database # @return [ String ] The name of the collection. attr_reader :name # @return [ Hash ] The collection options. attr_reader :options # Get client, cluster, read preference, and write concern from client. def_delegators :database, :client, :cluster # Delegate to the cluster for the next primary. def_delegators :cluster, :next_primary # Options that can be updated on a new Collection instance via the #with method. # # @since 2.1.0 CHANGEABLE_OPTIONS = [ :read, :read_concern, :write ].freeze # Check if a collection is equal to another object. Will check the name and # the database for equality. # # @example Check collection equality. # collection == other # # @param [ Object ] other The object to check. # # @return [ true, false ] If the objects are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Collection) name == other.name && database == other.database && options == other.options end # Instantiate a new collection. # # @example Instantiate a new collection. # Mongo::Collection.new(database, 'test') # # @param [ Mongo::Database ] database The collection's database. # @param [ String, Symbol ] name The collection name. # @param [ Hash ] options The collection options. # # @since 2.0.0 def initialize(database, name, options = {}) raise Error::InvalidCollectionName.new unless name @database = database @name = name.to_s.freeze @options = options.freeze end # Get the read concern for this collection instance. # # @example Get the read concern. # collection.read_concern # # @return [ Hash ] The read concern. # # @since 2.2.0 def read_concern @read_concern ||= options[:read_concern] end # Get the server selector on this collection. # # @example Get the server selector. # collection.server_selector # # @return [ Mongo::ServerSelector ] The server selector. # # @since 2.0.0 def server_selector @server_selector ||= ServerSelector.get(read_preference || database.server_selector) end # Get the read preference on this collection. # # @example Get the read preference. # collection.read_preference # # @return [ Hash ] The read preference. # # @since 2.0.0 def read_preference @read_preference ||= options[:read] || database.read_preference end # Get the write concern on this collection. # # @example Get the write concern. # collection.write_concern # # @return [ Mongo::WriteConcern ] The write concern. # # @since 2.0.0 def write_concern @write_concern ||= WriteConcern.get(options[:write] || database.write_concern) end # Provides a new collection with either a new read preference or new write concern # merged over the existing read preference / write concern. # # @example Get a collection with changed read preference. # collection.with(:read => { :mode => :primary_preferred }) # # @example Get a collection with changed write concern. # collection.with(:write => { w: 3 }) # @param [ Hash ] new_options The new options to use. # # @return [ Mongo::Collection ] A new collection instance. # # @since 2.1.0 def with(new_options) new_options.keys.each do |k| raise Error::UnchangeableCollectionOption.new(k) unless CHANGEABLE_OPTIONS.include?(k) end Collection.new(database, name, options.merge(new_options)) end # Is the collection capped? # # @example Is the collection capped? # collection.capped? # # @return [ true, false ] If the collection is capped. # # @since 2.0.0 def capped? database.command(:collstats => name).documents[0][CAPPED] end # Force the collection to be created in the database. # # @example Force the collection to be created. # collection.create # # @param [ Hash ] opts The options for the create operation. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The result of the command. # # @since 2.0.0 def create(opts = {}) operation = { :create => name }.merge(options) operation.delete(:write) server = next_primary if (options[:collation] || options[Operation::COLLATION]) && !server.features.collation_enabled? raise Error::UnsupportedCollation.new end client.send(:with_session, opts) do |session| Operation::Commands::Create.new({ selector: operation, db_name: database.name, write_concern: write_concern, session: session }).execute(server) end end # Drop the collection. Will also drop all indexes associated with the # collection. # # @note An error returned if the collection doesn't exist is suppressed. # # @example Drop the collection. # collection.drop # # @param [ Hash ] opts The options for the drop operation. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The result of the command. # # @since 2.0.0 def drop(opts = {}) client.send(:with_session, opts) do |session| Operation::Commands::Drop.new({ selector: { :drop => name }, db_name: database.name, write_concern: write_concern, session: session }).execute(next_primary) end rescue Error::OperationFailure => ex raise ex unless ex.message =~ /ns not found/ false end # Find documents in the collection. # # @example Find documents in the collection by a selector. # collection.find(name: 1) # # @example Get all documents in a collection. # collection.find # # @param [ Hash ] filter The filter to use in the find. # @param [ Hash ] options The options for the find. # # @option options [ true, false ] :allow_partial_results Allows the query to get partial # results if some shards are down. # @option options [ Integer ] :batch_size The number of documents returned in each batch # of results from MongoDB. # @option options [ String ] :comment Associate a comment with the query. # @option options [ :tailable, :tailable_await ] :cursor_type The type of cursor to use. # @option options [ Integer ] :limit The max number of docs to return from the query. # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the query # to run in milliseconds. # @option options [ Hash ] :modifiers A document containing meta-operators modifying the # output or behavior of a query. # @option options [ true, false ] :no_cursor_timeout The server normally times out idle # cursors after an inactivity period (10 minutes) to prevent excess memory use. # Set this option to prevent that. # @option options [ true, false ] :oplog_replay Internal replication use only - driver # should not set. # @option options [ Hash ] :projection The fields to include or exclude from each doc # in the result set. # @option options [ Integer ] :skip The number of docs to skip before returning results. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ CollectionView ] The collection view. # # @since 2.0.0 def find(filter = nil, options = {}) View.new(self, filter || {}, options) end # Perform an aggregation on the collection. # # @example Perform an aggregation. # collection.aggregate([ { "$group" => { "_id" => "$city", "tpop" => { "$sum" => "$pop" }}} ]) # # @param [ Array ] pipeline The aggregation pipeline. # @param [ Hash ] options The aggregation options. # # @option options [ true, false ] :allow_disk_use Set to true if disk usage is allowed during # the aggregation. # @option options [ Integer ] :batch_size The number of documents to return per batch. # @option options [ Integer ] :max_time_ms The maximum amount of time in milliseconds to allow the # aggregation to run. # @option options [ true, false ] :use_cursor Indicates whether the command will request that the server # provide results using a cursor. Note that as of server version 3.6, aggregations always provide results # using a cursor and this option is therefore not valid. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :collation The collation to use. # @option options [ String ] :comment Associate a comment with the aggregation. # @option options [ Session ] :session The session to use. # # @return [ Aggregation ] The aggregation object. # # @since 2.1.0 def aggregate(pipeline, options = {}) View.new(self, {}).aggregate(pipeline, options) end # As of version 3.6 of the MongoDB server, a ``$changeStream`` pipeline stage is supported # in the aggregation framework. This stage allows users to request that notifications are sent for # all changes to a particular collection. # # @example Get change notifications for a given collection. # collection.watch([{ '$match' => { operationType: { '$in' => ['insert', 'replace'] } } }]) # # @param [ Array ] pipeline Optional additional filter operators. # @param [ Hash ] options The change stream options. # # @option options [ String ] :full_document Allowed values: ‘default’, ‘updateLookup’. Defaults to ‘default’. # When set to ‘updateLookup’, the change notification for partial updates will include both a delta # describing the changes to the document, as well as a copy of the entire document that was changed # from some time after the change occurred. # @option options [ BSON::Document, Hash ] :resume_after Specifies the logical starting point for the # new change stream. # @option options [ Integer ] :max_await_time_ms The maximum amount of time for the server to wait # on new documents to satisfy a change stream query. # @option options [ Integer ] :batch_size The number of documents to return per batch. # @option options [ BSON::Document, Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @note A change stream only allows 'majority' read concern. # @note This helper method is preferable to running a raw aggregation with a $changeStream stage, # for the purpose of supporting resumability. # # @return [ ChangeStream ] The change stream object. # # @since 2.5.0 def watch(pipeline = [], options = {}) View::ChangeStream.new(View.new(self, {}, options), pipeline, options) end # Get a count of matching documents in the collection. # # @example Get the count. # collection.count(name: 1) # # @param [ Hash ] filter A filter for matching documents. # @param [ Hash ] options The count options. # # @option options [ Hash ] :hint The index to use. # @option options [ Integer ] :limit The maximum number of documents to count. # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run. # @option options [ Integer ] :skip The number of documents to skip before counting. # @option options [ Hash ] :read The read preference options. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ Integer ] The document count. # # @since 2.1.0 def count(filter = nil, options = {}) View.new(self, filter || {}, options).count(options) end # Get a list of distinct values for a specific field. # # @example Get the distinct values. # collection.distinct('name') # # @param [ Symbol, String ] field_name The name of the field. # @param [ Hash ] filter The documents from which to retrieve the distinct values. # @param [ Hash ] options The distinct command options. # # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run. # @option options [ Hash ] :read The read preference options. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ Array ] The list of distinct values. # # @since 2.1.0 def distinct(field_name, filter = nil, options = {}) View.new(self, filter || {}, options).distinct(field_name, options) end # Get a view of all indexes for this collection. Can be iterated or has # more operations. # # @example Get the index view. # collection.indexes # # @param [ Hash ] options Options for getting a list of all indexes. # # @option options [ Session ] :session The session to use. # # @return [ View::Index ] The index view. # # @since 2.0.0 def indexes(options = {}) Index::View.new(self, options) end # Get a pretty printed string inspection for the collection. # # @example Inspect the collection. # collection.inspect # # @return [ String ] The collection inspection. # # @since 2.0.0 def inspect "#" end # Insert a single document into the collection. # # @example Insert a document into the collection. # collection.insert_one({ name: 'test' }) # # @param [ Hash ] document The document to insert. # @param [ Hash ] opts The insert options. # # @option opts [ Session ] :session The session to use for the operation. # # @return [ Result ] The database response wrapper. # # @since 2.0.0 def insert_one(document, opts = {}) client.send(:with_session, opts) do |session| write_with_retry(session, write_concern) do |server, txn_num| Operation::Write::Insert.new( :documents => [ document ], :db_name => database.name, :coll_name => name, :write_concern => write_concern, :bypass_document_validation => !!opts[:bypass_document_validation], :options => opts, :id_generator => client.options[:id_generator], :session => session, :txn_num => txn_num ).execute(server) end end end # Insert the provided documents into the collection. # # @example Insert documents into the collection. # collection.insert_many([{ name: 'test' }]) # # @param [ Array ] documents The documents to insert. # @param [ Hash ] options The insert options. # # @option options [ Session ] :session The session to use for the operation. # # @return [ Result ] The database response wrapper. # # @since 2.0.0 def insert_many(documents, options = {}) inserts = documents.map{ |doc| { :insert_one => doc }} bulk_write(inserts, options) end # Execute a batch of bulk write operations. # # @example Execute a bulk write. # collection.bulk_write(operations, options) # # @param [ Array ] requests The bulk write requests. # @param [ Hash ] options The options. # # @option options [ true, false ] :ordered Whether the operations # should be executed in order. # @option options [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Session ] :session The session to use for the set of operations. # # @return [ BulkWrite::Result ] The result of the operation. # # @since 2.0.0 def bulk_write(requests, options = {}) BulkWrite.new(self, requests, options).execute end # Remove a document from the collection. # # @example Remove a single document from the collection. # collection.delete_one # # @param [ Hash ] filter The filter to use. # @param [ Hash ] options The options. # # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ Result ] The response from the database. # # @since 2.1.0 def delete_one(filter = nil, options = {}) find(filter, options).delete_one(options) end # Remove documents from the collection. # # @example Remove multiple documents from the collection. # collection.delete_many # # @param [ Hash ] filter The filter to use. # @param [ Hash ] options The options. # # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ Result ] The response from the database. # # @since 2.1.0 def delete_many(filter = nil, options = {}) find(filter, options).delete_many(options) end # Execute a parallel scan on the collection view. # # Returns a list of up to cursor_count cursors that can be iterated concurrently. # As long as the collection is not modified during scanning, each document appears once # in one of the cursors' result sets. # # @example Execute a parallel collection scan. # collection.parallel_scan(2) # # @param [ Integer ] cursor_count The max number of cursors to return. # @param [ Hash ] options The parallel scan command options. # # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option options [ Session ] :session The session to use. # # @return [ Array ] An array of cursors. # # @since 2.1 def parallel_scan(cursor_count, options = {}) find({}, options).send(:parallel_scan, cursor_count, options) end # Replaces a single document in the collection with the new document. # # @example Replace a single document. # collection.replace_one({ name: 'test' }, { name: 'test1' }) # # @param [ Hash ] filter The filter to use. # @param [ Hash ] replacement The replacement document.. # @param [ Hash ] options The options. # # @option options [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ Result ] The response from the database. # # @since 2.1.0 def replace_one(filter, replacement, options = {}) find(filter, options).replace_one(replacement, options) end # Update documents in the collection. # # @example Update multiple documents in the collection. # collection.update_many({ name: 'test'}, '$set' => { name: 'test1' }) # # @param [ Hash ] filter The filter to use. # @param [ Hash ] update The update statement. # @param [ Hash ] options The options. # # @option options [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :collation The collation to use. # @option options [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option options [ Session ] :session The session to use. # # @return [ Result ] The response from the database. # # @since 2.1.0 def update_many(filter, update, options = {}) find(filter, options).update_many(update, options) end # Update a single document in the collection. # # @example Update a single document in the collection. # collection.update_one({ name: 'test'}, '$set' => { name: 'test1'}) # # @param [ Hash ] filter The filter to use. # @param [ Hash ] update The update statement. # @param [ Hash ] options The options. # # @option options [ true, false ] :upsert Whether to upsert if the # document doesn't exist. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :collation The collation to use. # @option options [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option options [ Session ] :session The session to use. # # @return [ Result ] The response from the database. # # @since 2.1.0 def update_one(filter, update, options = {}) find(filter, options).update_one(update, options) end # Finds a single document in the database via findAndModify and deletes # it, returning the original document. # # @example Find one document and delete it. # collection.find_one_and_delete(name: 'test') # # @param [ Hash ] filter The filter to use. # @param [ Hash ] options The options. # # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option options [ Hash ] :projection The fields to include or exclude in the returned doc. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. # @option options [ Hash ] :write_concern The write concern options. # Defaults to the collection's write concern. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ BSON::Document, nil ] The document, if found. # # @since 2.1.0 def find_one_and_delete(filter, options = {}) find(filter, options).find_one_and_delete(options) end # Finds a single document via findAndModify and updates it, returning the original doc unless # otherwise specified. # # @example Find a document and update it, returning the original. # collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }}) # # @example Find a document and update it, returning the updated document. # collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }}, :return_document => :after) # # @param [ Hash ] filter The filter to use. # @param [ BSON::Document ] update The update statement. # @param [ Hash ] options The options. # # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option options [ Hash ] :projection The fields to include or exclude in the returned doc. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. # @option options [ Symbol ] :return_document Either :before or :after. # @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :write_concern The write concern options. # Defaults to the collection's write concern. # @option options [ Hash ] :collation The collation to use. # @option options [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option options [ Session ] :session The session to use. # # @return [ BSON::Document ] The document. # # @since 2.1.0 def find_one_and_update(filter, update, options = {}) find(filter, options).find_one_and_update(update, options) end # Finds a single document and replaces it, returning the original doc unless # otherwise specified. # # @example Find a document and replace it, returning the original. # collection.find_one_and_replace({ name: 'test' }, { name: 'test1' }) # # @example Find a document and replace it, returning the new document. # collection.find_one_and_replace({ name: 'test' }, { name: 'test1' }, :return_document => :after) # # @param [ Hash ] filter The filter to use. # @param [ BSON::Document ] replacement The replacement document. # @param [ Hash ] options The options. # # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option options [ Hash ] :projection The fields to include or exclude in the returned doc. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. # @option options [ Symbol ] :return_document Either :before or :after. # @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist. # @option options [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Hash ] :write_concern The write concern options. # Defaults to the collection's write concern. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. # # @return [ BSON::Document ] The document. # # @since 2.1.0 def find_one_and_replace(filter, replacement, options = {}) find(filter, options).find_one_and_update(replacement, options) end # Get the fully qualified namespace of the collection. # # @example Get the fully qualified namespace. # collection.namespace # # @return [ String ] The collection namespace. # # @since 2.0.0 def namespace "#{database.name}.#{name}" end end end mongo-2.5.1/lib/mongo/bulk_write.rb0000644000004100000410000001656313257253113017264 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/bulk_write/result' require 'mongo/bulk_write/transformable' require 'mongo/bulk_write/validatable' require 'mongo/bulk_write/combineable' require 'mongo/bulk_write/ordered_combiner' require 'mongo/bulk_write/unordered_combiner' require 'mongo/bulk_write/result_combiner' module Mongo class BulkWrite extend Forwardable include Retryable # @return [ Mongo::Collection ] collection The collection. attr_reader :collection # @return [ Array ] requests The requests. attr_reader :requests # @return [ Hash, BSON::Document ] options The options. attr_reader :options # Delegate various methods to the collection. def_delegators :@collection, :database, :cluster, :next_primary def_delegators :database, :client # Execute the bulk write operation. # # @example Execute the bulk write. # bulk_write.execute # # @return [ Mongo::BulkWrite::Result ] The result. # # @since 2.1.0 def execute operation_id = Monitoring.next_operation_id result_combiner = ResultCombiner.new operations = op_combiner.combine client.send(:with_session, @options) do |session| operations.each do |operation| if single_statement?(operation) write_with_retry(session, write_concern) do |server, txn_num| execute_operation( operation.keys.first, operation.values.first, server, operation_id, result_combiner, session, txn_num) end else legacy_write_with_retry do |server| execute_operation( operation.keys.first, operation.values.first, server, operation_id, result_combiner, session) end end end end result_combiner.result end # Create the new bulk write operation. # # @api private # # @example Create an ordered bulk write. # Mongo::BulkWrite.new(collection, [{ insert_one: { _id: 1 }}]) # # @example Create an unordered bulk write. # Mongo::BulkWrite.new(collection, [{ insert_one: { _id: 1 }}], ordered: false) # # @example Create an ordered mixed bulk write. # Mongo::BulkWrite.new( # collection, # [ # { insert_one: { _id: 1 }}, # { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}}, # { delete_one: { filter: { _id: 2 }}} # ] # ) # # @param [ Mongo::Collection ] collection The collection. # @param [ Array ] requests The requests. # @param [ Hash, BSON::Document ] options The options. # # @since 2.1.0 def initialize(collection, requests, options = {}) @collection = collection @requests = requests @options = options || {} end # Is the bulk write ordered? # # @api private # # @example Is the bulk write ordered? # bulk_write.ordered? # # @return [ true, false ] If the bulk write is ordered. # # @since 2.1.0 def ordered? @ordered ||= options.fetch(:ordered, true) end # Get the write concern for the bulk write. # # @api private # # @example Get the write concern. # bulk_write.write_concern # # @return [ WriteConcern ] The write concern. # # @since 2.1.0 def write_concern @write_concern ||= options[:write_concern] ? WriteConcern.get(options[:write_concern]) : collection.write_concern end private SINGLE_STATEMENT_OPS = [ :delete_one, :update_one, :insert_one ].freeze def single_statement?(operation) SINGLE_STATEMENT_OPS.include?(operation.keys.first) end def base_spec(operation_id, session) { :db_name => database.name, :coll_name => collection.name, :write_concern => write_concern, :ordered => ordered?, :operation_id => operation_id, :bypass_document_validation => !!options[:bypass_document_validation], :options => options, :id_generator => client.options[:id_generator], :session => session } end def execute_operation(name, values, server, operation_id, combiner, session, txn_num = nil) raise Error::UnsupportedCollation.new if op_combiner.has_collation && !server.features.collation_enabled? raise Error::UnsupportedArrayFilters.new if op_combiner.has_array_filters && !server.features.array_filters_enabled? begin if values.size > server.max_write_batch_size split_execute(name, values, server, operation_id, combiner, session, txn_num) else combiner.combine!(send(name, values, server, operation_id, session, txn_num), values.size) end rescue Error::MaxBSONSize, Error::MaxMessageSize => e raise e if values.size <= 1 split_execute(name, values, server, operation_id, combiner, session, txn_num) end end def op_combiner @op_combiner ||= ordered? ? OrderedCombiner.new(requests) : UnorderedCombiner.new(requests) end def split_execute(name, values, server, operation_id, combiner, session, txn_num) execute_operation(name, values.shift(values.size / 2), server, operation_id, combiner, session, txn_num) txn_num = session.next_txn_num if txn_num execute_operation(name, values, server, operation_id, combiner, session, txn_num) end def delete_one(documents, server, operation_id, session, txn_num) Operation::Write::Bulk::Delete.new( base_spec(operation_id, session).merge(:deletes => documents, :txn_num => txn_num) ).execute(server) end def delete_many(documents, server, operation_id, session, txn_num) Operation::Write::Bulk::Delete.new( base_spec(operation_id, session).merge(:deletes => documents) ).execute(server) end def insert_one(documents, server, operation_id, session, txn_num) Operation::Write::Bulk::Insert.new( base_spec(operation_id, session).merge(:documents => documents, :txn_num => txn_num) ).execute(server) end def update_one(documents, server, operation_id, session, txn_num) Operation::Write::Bulk::Update.new( base_spec(operation_id, session).merge(:updates => documents, :txn_num => txn_num) ).execute(server) end alias :replace_one :update_one def update_many(documents, server, operation_id, session, txn_num) Operation::Write::Bulk::Update.new( base_spec(operation_id, session).merge(:updates => documents) ).execute(server) end end end mongo-2.5.1/lib/mongo/event/0000755000004100000410000000000013257253113015676 5ustar www-datawww-datamongo-2.5.1/lib/mongo/event/description_changed.rb0000644000004100000410000000404613257253113022223 0ustar www-datawww-data # Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # This handles a change in description. # # @since 2.0.6 class DescriptionChanged include Monitoring::Publishable # @return [ Mongo::Cluster ] cluster The cluster. attr_reader :cluster # @return [ Hash ] options The options. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # Initialize the new host added event handler. # # @example Create the new handler. # ServerAdded.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster to publish from. # # @since 2.0.0 def initialize(cluster) @cluster = cluster @options = cluster.options @monitoring = cluster.monitoring end # This event publishes an event to add the cluster and logs the # configuration change. # # @example Handle the event. # server_added.handle('127.0.0.1:27018') # # @param [ Server::Description ] updated The changed description. # # @since 2.0.0 def handle(previous, updated) publish_sdam_event( Monitoring::SERVER_DESCRIPTION_CHANGED, Monitoring::Event::ServerDescriptionChanged.new( updated.address, cluster.topology, previous, updated ) ) cluster.add_hosts(updated) cluster.remove_hosts(updated) end end end end mongo-2.5.1/lib/mongo/event/listeners.rb0000644000004100000410000000333713257253113020241 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # The queue of events getting processed in the client. # # @since 2.0.0 class Listeners # Initialize the event listeners. # # @example Initialize the event listeners. # Listeners.new # # @since 2.0.0 def initialize @listeners = {} end # Add an event listener for the provided event. # # @example Add an event listener # publisher.add_listener("my_event", listener) # # @param [ String ] event The event to listen for. # @param [ Object ] listener The event listener. # # @return [ Array ] The listeners for the event. # # @since 2.0.0 def add_listener(event, listener) listeners_for(event).push(listener) end # Get the listeners for a specific event. # # @example Get the listeners. # publisher.listeners_for("test") # # @param [ String ] event The event name. # # @return [ Array ] The listeners. # # @since 2.0.0 def listeners_for(event) @listeners[event] ||= [] end end end end mongo-2.5.1/lib/mongo/event/member_discovered.rb0000644000004100000410000000376313257253113021712 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # This handles member discovered events for server descriptions. # # @since 2.4.0 class MemberDiscovered include Monitoring::Publishable # @return [ Mongo::Cluster ] cluster The cluster. attr_reader :cluster # @return [ Hash ] options The options. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # Initialize the new member discovered event handler. # # @example Create the new handler. # MemberDiscovered.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster to publish from. # # @since 2.0.0 def initialize(cluster) @cluster = cluster @options = cluster.options @monitoring = cluster.monitoring end # This event tells the cluster that a member of a topology is discovered. # # @example Handle the event. # member_discovered.handle(previous_description, description) # # @param [ Server::Description ] previous The previous description of the server. # @param [ Server::Description ] updated The updated description of the server. # # @since 2.4.0 def handle(previous, updated) if updated.primary? || updated.mongos? cluster.elect_primary!(updated) else cluster.member_discovered end end end end end mongo-2.5.1/lib/mongo/event/standalone_discovered.rb0000644000004100000410000000302513257253113022562 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # This handles when a standalone is discovered. # # @since 2.0.6 class StandaloneDiscovered # @return [ Mongo::Cluster ] cluster The cluster. attr_reader :cluster # Initialize the new standalone discovered event handler. # # @example Create the new handler. # StandaloneDiscovered.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster to publish from. # # @since 2.0.6 def initialize(cluster) @cluster = cluster end # This event tells the cluster to notify its topology that a standalone # was discovered. # # @example Handle the event. # standalone_discovered.handle(description) # # @param [ Server::Description ] description The description of the # server. # # @since 2.0.6 def handle(description) cluster.standalone_discovered end end end end mongo-2.5.1/lib/mongo/event/publisher.rb0000644000004100000410000000242613257253113020224 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # This module is included for objects that need to publish events. # # @since 2.0.0 module Publisher # @return [ Event::Listeners ] event_listeners The listeners. attr_reader :event_listeners # Publish the provided event. # # @example Publish an event. # publisher.publish("my_event", "payload") # # @param [ String ] event The event to publish. # @param [ Array ] args The objects to pass to the listeners. # # @since 2.0.0 def publish(event, *args) event_listeners.listeners_for(event).each do |listener| listener.handle(*args) end end end end end mongo-2.5.1/lib/mongo/event/primary_elected.rb0000644000004100000410000000310513257253113021372 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # This handles primary elected events for server descriptions. # # @since 2.0.0 # # @deprecated. Will be removed in 3.0 class PrimaryElected # @return [ Mongo::Cluster ] cluster The cluster. attr_reader :cluster # Initialize the new primary elected event handler. # # @example Create the new handler. # PrimaryElected.new(cluster) # # @param [ Mongo::Cluster ] cluster The cluster to publish from. # # @since 2.0.0 def initialize(cluster) @cluster = cluster end # This event tells the cluster to take all previous primaries to an # unknown state. # # @example Handle the event. # primary_elected.handle(description) # # @param [ Server::Description ] description The description of the # elected server. # # @since 2.0.0 def handle(description) cluster.elect_primary!(description) end end end end mongo-2.5.1/lib/mongo/event/subscriber.rb0000644000004100000410000000232713257253113020372 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module Event # Adds convenience methods for adding listeners to event publishers. # # @since 2.0.0 module Subscriber # @return [ Event::Listeners ] event_listeners The listeners. attr_reader :event_listeners # Subscribe to the provided event. # # @example Subscribe to the event. # subscriber.subscribe_to('test', listener) # # @param [ String ] event The event. # @param [ Object ] listener The event listener. # # @since 2.0.0 def subscribe_to(event, listener) event_listeners.add_listener(event, listener) end end end end mongo-2.5.1/lib/mongo/write_concern/0000755000004100000410000000000013257253113017416 5ustar www-datawww-datamongo-2.5.1/lib/mongo/write_concern/unacknowledged.rb0000644000004100000410000000307113257253113022736 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module WriteConcern # An unacknowledged write concern will provide no error on write outside of # network and connection exceptions. # # @since 2.0.0 class Unacknowledged include Normalizable # The noop constant for the gle. # # @since 2.0.0 NOOP = nil # Get the gle command for an unacknowledged write. # # @example Get the gle command. # unacknowledged.get_last_error # # @return [ nil ] The noop. # # @since 2.0.0 def get_last_error NOOP end # Get a human-readable string representation of an unacknowledged write concern. # # @example Inspect the write concern. # write_concern.inspect # # @return [ String ] A string representation of an unacknowledged write concern. # # @since 2.0.0 def inspect "#" end end end end mongo-2.5.1/lib/mongo/write_concern/acknowledged.rb0000644000004100000410000000314013257253113022370 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module WriteConcern # An acknowledged write concern provides a get last error command with the # appropriate options on each write operation. # # @since 2.0.0 class Acknowledged include Normalizable # Get the get last error command for the concern. # # @example Get the gle command. # acknowledged.get_last_error # # @return [ Hash ] The gle command. # # @since 2.0.0 def get_last_error @get_last_error ||= { GET_LAST_ERROR => 1 }.merge( Options::Mapper.transform_values_to_strings(options) ) end # Get a human-readable string representation of an acknowledged write concern. # # @example Inspect the write concern. # write_concern.inspect # # @return [ String ] A string representation of an acknowledged write concern. # # @since 2.0.0 def inspect "#" end end end end mongo-2.5.1/lib/mongo/write_concern/normalizable.rb0000644000004100000410000000415413257253113022426 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module WriteConcern # Defines default behavior for write concerns and provides a factory # interface to get a proper object from options. # # @since 2.0.0 module Normalizable # @return [ Hash ] The write concern options. attr_reader :options # Instantiate a new write concern given the options. # # @api private # # @example Instantiate a new write concern mode. # Mongo::WriteConcern.new(:w => 1) # # @param [ Hash ] options The options to instantiate with. # # @option options :w [ Integer, String ] The number of servers or the # custom mode to acknowledge. # @option options :j [ true, false ] Whether to acknowledge a write to # the journal. # @option options :fsync [ true, false ] Should the write be synced to # disc. # @option options :wtimeout [ Integer ] The number of milliseconds to # wait for acknowledgement before raising an error. # # @since 2.0.0 def initialize(options) opts = Options::Mapper.transform_keys_to_symbols(options) @options = Options::Mapper.transform_values_to_strings(opts).freeze end # Is this write concern acknowledged. # # @example Whether this write concern object is acknowledged. # write_concern.acknowledged? # # @return [ true, false ] Whether this write concern is acknowledged. # # @since 2.5.0 def acknowledged? !!get_last_error end end end end mongo-2.5.1/lib/mongo/server_selector/0000755000004100000410000000000013257253113017763 5ustar www-datawww-datamongo-2.5.1/lib/mongo/server_selector/primary_preferred.rb0000644000004100000410000000624013257253113024033 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Encapsulates specifications for selecting servers, with the # primary preferred, given a list of candidates. # # @since 2.0.0 class PrimaryPreferred include Selectable # Name of the this read preference in the server's format. # # @since 2.5.0 SERVER_FORMATTED_NAME = 'primaryPreferred'.freeze # Get the name of the server mode type. # # @example Get the name of the server mode for this preference. # preference.name # # @return [ Symbol ] :primary_preferred # # @since 2.0.0 def name :primary_preferred end # Whether the slaveOk bit should be set on wire protocol messages. # I.e. whether the operation can be performed on a secondary server. # # @return [ true ] true # # @since 2.0.0 def slave_ok? true end # Whether tag sets are allowed to be defined for this server preference. # # @return [ true ] true # # @since 2.0.0 def tags_allowed? true end # Convert this server preference definition into a format appropriate # for a mongos server. # # @example Convert this server preference definition into a format # for mongos. # preference = Mongo::ServerSelector::PrimaryPreferred.new # preference.to_mongos # # @return [ Hash ] The server preference formatted for a mongos server. # # @since 2.0.0 def to_mongos @doc ||= (preference = { :mode => SERVER_FORMATTED_NAME } preference.merge!({ :tags => tag_sets }) unless tag_sets.empty? preference.merge!({ maxStalenessSeconds: max_staleness }) if max_staleness preference) end alias :to_doc :to_mongos private # Select servers taking into account any defined tag sets and # local threshold, with the primary preferred. # # @example Select servers given a list of candidates, # with the primary preferred. # preference = Mongo::ServerSelector::PrimaryPreferred.new # preference.select([candidate_1, candidate_2]) # # @return [ Array ] A list of servers matching tag sets and acceptable # latency with the primary preferred. # # @since 2.0.0 def select(candidates) primary = primary(candidates) secondaries = near_servers(secondaries(candidates)) primary.first ? primary : secondaries end def max_staleness_allowed? true end end end end mongo-2.5.1/lib/mongo/server_selector/selectable.rb0000644000004100000410000002367113257253113022424 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Provides common behavior for filtering a list of servers by server mode or tag set. # # @since 2.0.0 module Selectable # @return [ Hash ] options The options. attr_reader :options # @return [ Array ] tag_sets The tag sets used to select servers. attr_reader :tag_sets # @return [ Integer ] max_staleness The maximum replication lag, in seconds, that a # secondary can suffer and still be eligible for a read. # # @since 2.4.0 attr_reader :max_staleness # Check equality of two server selector. # # @example Check server selector equality. # preference == other # # @param [ Object ] other The other preference. # # @return [ true, false ] Whether the objects are equal. # # @since 2.0.0 def ==(other) name == other.name && tag_sets == other.tag_sets && max_staleness == other.max_staleness end # Initialize the server selector. # # @example Initialize the selector. # Mongo::ServerSelector::Secondary.new(:tag_sets => [{'dc' => 'nyc'}]) # # @example Initialize the preference with no options. # Mongo::ServerSelector::Secondary.new # # @param [ Hash ] options The server preference options. # # @option options [ Integer ] :local_threshold The local threshold boundary for # nearest selection in seconds. # # @raise [ Error::InvalidServerPreference ] If tag sets are specified # but not allowed. # # @since 2.0.0 def initialize(options = {}) @options = (options || {}).freeze @tag_sets = (options[:tag_sets] || []).freeze @max_staleness = options[:max_staleness] unless options[:max_staleness] == -1 validate! end # Inspect the server selector. # # @example Inspect the server selector. # selector.inspect # # @return [ String ] The inspection. # # @since 2.2.0 def inspect "#<#{self.class.name}:0x#{object_id} tag_sets=#{tag_sets.inspect} max_staleness=#{max_staleness.inspect}>" end # Select a server from eligible candidates. # # @example Select a server from the cluster. # selector.select_server(cluster) # # @param [ Mongo::Cluster ] cluster The cluster from which to select an eligible server. # # @return [ Mongo::Server ] A server matching the server preference. # # @since 2.0.0 def select_server(cluster, ping = true) @local_threshold = cluster.options[:local_threshold] || LOCAL_THRESHOLD @server_selection_timeout = cluster.options[:server_selection_timeout] || SERVER_SELECTION_TIMEOUT deadline = Time.now + server_selection_timeout while (deadline - Time.now) > 0 servers = candidates(cluster) if servers && !servers.compact.empty? server = servers.first # There is no point pinging a standalone as the subsequent scan is # not going to change anything about the cluster. if ping && !cluster.single? if server.connectable? server.check_driver_support! return server end else server.check_driver_support! return server end end cluster.scan! end raise Error::NoServerAvailable.new(self) end # Get the timeout for server selection. # # @example Get the server selection timeout, in seconds. # selector.server_selection_timeout # # @return [ Float ] The timeout. # # @since 2.0.0 # # @deprecated This setting is now taken from the cluster options when a server is selected. # Will be removed in 3.0. def server_selection_timeout @server_selection_timeout ||= (options[:server_selection_timeout] || ServerSelector::SERVER_SELECTION_TIMEOUT) end # Get the local threshold boundary for nearest selection in seconds. # # @example Get the local threshold. # selector.local_threshold # # @return [ Float ] The local threshold. # # @since 2.0.0 # # @deprecated This setting is now taken from the cluster options when a server is selected. # Will be removed in 3.0. def local_threshold @local_threshold ||= (options[:local_threshold] || ServerSelector::LOCAL_THRESHOLD) end # Get the potential candidates to select from the cluster. # # @example Get the server candidates. # selectable.candidates(cluster) # # @param [ Cluster ] cluster The cluster. # # @return [ Array ] The candidate servers. # # @since 2.4.0 def candidates(cluster) if cluster.single? cluster.servers.each { |server| validate_max_staleness_support!(server) } elsif cluster.sharded? near_servers(cluster.servers).each { |server| validate_max_staleness_support!(server) } else validate_max_staleness_value!(cluster) unless cluster.unknown? select(cluster.servers) end end private # Select the primary from a list of provided candidates. # # @param [ Array ] candidates List of candidate servers to select the # primary from. # # @return [ Array ] The primary. # # @since 2.0.0 def primary(candidates) candidates.select do |server| server.primary? end end # Select the secondaries from a list of provided candidates. # # @param [ Array ] candidates List of candidate servers to select the # secondaries from. # # @return [ Array ] The secondary servers. # # @since 2.0.0 def secondaries(candidates) matching_servers = candidates.select(&:secondary?) matching_servers = filter_stale_servers(matching_servers, primary(candidates).first) matching_servers = match_tag_sets(matching_servers) unless tag_sets.empty? matching_servers end # Select the near servers from a list of provided candidates, taking the # local threshold into account. # # @param [ Array ] candidates List of candidate servers to select the # near servers from. # # @return [ Array ] The near servers. # # @since 2.0.0 def near_servers(candidates = []) return candidates if candidates.empty? nearest_server = candidates.min_by(&:average_round_trip_time) threshold = nearest_server.average_round_trip_time + local_threshold candidates.select { |server| server.average_round_trip_time <= threshold }.shuffle! end # Select the servers matching the defined tag sets. # # @param [ Array ] candidates List of candidate servers from which those # matching the defined tag sets should be selected. # # @return [ Array ] The servers matching the defined tag sets. # # @since 2.0.0 def match_tag_sets(candidates) matches = [] tag_sets.find do |tag_set| matches = candidates.select { |server| server.matches_tag_set?(tag_set) } !matches.empty? end matches || [] end def filter_stale_servers(candidates, primary = nil) return candidates unless @max_staleness max_staleness_ms = @max_staleness * 1000 if primary candidates.select do |server| validate_max_staleness_support!(server) staleness = (server.last_scan - server.last_write_date) - (primary.last_scan - primary.last_write_date) + (server.heartbeat_frequency_seconds * 1000) staleness <= max_staleness_ms end else max_write_date = candidates.collect(&:last_write_date).max candidates.select do |server| validate_max_staleness_support!(server) staleness = max_write_date - server.last_write_date + (server.heartbeat_frequency_seconds * 1000) staleness <= max_staleness_ms end end end def validate! if !@tag_sets.all? { |set| set.empty? } && !tags_allowed? raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_TAG_SUPPORT) elsif @max_staleness && !max_staleness_allowed? raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_MAX_STALENESS_SUPPORT) end end def validate_max_staleness_support!(server) if @max_staleness && !server.features.max_staleness_enabled? raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::NO_MAX_STALENESS_WITH_LEGACY_SERVER) end end def validate_max_staleness_value!(cluster) if @max_staleness heartbeat_frequency_seconds = cluster.options[:heartbeat_frequency] || Server::Monitor::HEARTBEAT_FREQUENCY unless @max_staleness >= [ SMALLEST_MAX_STALENESS_SECONDS, (heartbeat_frequency_seconds + Cluster::IDLE_WRITE_PERIOD_SECONDS) ].max raise Error::InvalidServerPreference.new(Error::InvalidServerPreference::INVALID_MAX_STALENESS) end end end end end end mongo-2.5.1/lib/mongo/server_selector/secondary.rb0000644000004100000410000000573313257253113022307 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Encapsulates specifications for selecting secondary servers given a list # of candidates. # # @since 2.0.0 class Secondary include Selectable # Name of the this read preference in the server's format. # # @since 2.5.0 SERVER_FORMATTED_NAME = 'secondary'.freeze # Get the name of the server mode type. # # @example Get the name of the server mode for this preference. # preference.name # # @return [ Symbol ] :secondary # # @since 2.0.0 def name :secondary end # Whether the slaveOk bit should be set on wire protocol messages. # I.e. whether the operation can be performed on a secondary server. # # @return [ true ] true # # @since 2.0.0 def slave_ok? true end # Whether tag sets are allowed to be defined for this server preference. # # @return [ true ] true # # @since 2.0.0 def tags_allowed? true end # Convert this server preference definition into a format appropriate # for a mongos server. # # @example Convert this server preference definition into a format # for mongos. # preference = Mongo::ServerSelector::Secondary.new # preference.to_mongos # # @return [ Hash ] The server preference formatted for a mongos server. # # @since 2.0.0 def to_mongos @doc ||= (preference = { :mode => SERVER_FORMATTED_NAME } preference.merge!({ :tags => tag_sets }) unless tag_sets.empty? preference.merge!({ maxStalenessSeconds: max_staleness }) if max_staleness preference) end alias :to_doc :to_mongos private # Select the secondary servers taking into account any defined tag sets and # local threshold between the nearest secondary and other secondaries. # # @example Select secondary servers given a list of candidates. # preference = Mongo::ServerSelector::Secondary.new # preference.select([candidate_1, candidate_2]) # # @return [ Array ] The secondary servers from the list of candidates. # # @since 2.0.0 def select(candidates) near_servers(secondaries(candidates)) end def max_staleness_allowed? true end end end end mongo-2.5.1/lib/mongo/server_selector/nearest.rb0000644000004100000410000000612013257253113021750 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Encapsulates specifications for selecting near servers given a list # of candidates. # # @since 2.0.0 class Nearest include Selectable # Name of the this read preference in the server's format. # # @since 2.5.0 SERVER_FORMATTED_NAME = 'nearest'.freeze # Get the name of the server mode type. # # @example Get the name of the server mode for this preference. # preference.name # # @return [ Symbol ] :nearest # # @since 2.0.0 def name :nearest end # Whether the slaveOk bit should be set on wire protocol messages. # I.e. whether the operation can be performed on a secondary server. # # @return [ true ] true # # @since 2.0.0 def slave_ok? true end # Whether tag sets are allowed to be defined for this server preference. # # @return [ true ] true # # @since 2.0.0 def tags_allowed? true end # Convert this server preference definition into a format appropriate # for a mongos server. # # @example Convert this server preference definition into a format # for mongos. # preference = Mongo::ServerSelector::Nearest.new # preference.to_mongos # # @return [ Hash ] The server preference formatted for a mongos server. # # @since 2.0.0 def to_mongos @doc ||= (preference = { :mode => SERVER_FORMATTED_NAME } preference.merge!({ :tags => tag_sets }) unless tag_sets.empty? preference.merge!({ maxStalenessSeconds: max_staleness }) if max_staleness preference) end alias :to_doc :to_mongos private # Select the near servers taking into account any defined tag sets and # local threshold between the nearest server and other servers. # # @example Select nearest servers given a list of candidates. # preference = Mongo::ServerSelector::Nearest.new # preference.select_server(cluster) # # @return [ Array ] The nearest servers from the list of candidates. # # @since 2.0.0 def select(candidates) matching_servers = filter_stale_servers(candidates, primary(candidates).first) matching_servers = match_tag_sets(matching_servers) unless tag_sets.empty? near_servers(matching_servers) end def max_staleness_allowed? true end end end end mongo-2.5.1/lib/mongo/server_selector/primary.rb0000644000004100000410000000601713257253113021777 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Encapsulates specifications for selecting the primary server given a list # of candidates. # # @since 2.0.0 class Primary include Selectable # Name of the this read preference in the server's format. # # @since 2.5.0 SERVER_FORMATTED_NAME = 'primary'.freeze # Get the name of the server mode type. # # @example Get the name of the server mode for this preference. # preference.name # # @return [ Symbol ] :primary # # @since 2.0.0 def name :primary end # Whether the slaveOk bit should be set on wire protocol messages. # I.e. whether the operation can be performed on a secondary server. # # @return [ false ] false # # @since 2.0.0 def slave_ok? false end # Whether tag sets are allowed to be defined for this server preference. # # @return [ false ] false # # @since 2.0.0 def tags_allowed? false end # Convert this server preference definition into a format appropriate # for a mongos server. # # @example Convert this server preference definition into a format # for mongos. # preference = Mongo::ServerSelector::Primary.new # preference.to_mongos # # @return [ nil ] nil # # @since 2.0.0 def to_mongos nil end # Convert this server preference definition into a format appropriate # for a mongodb server. # # @example Convert this server preference definition into a format # for a server. # preference = Mongo::ServerSelector::Primary.new # preference.to_doc # # @return [ Hash ] The server preference formatted for a mongodb server. # # @since 2.5.0 def to_doc @doc ||= { mode: SERVER_FORMATTED_NAME } end private # Select the primary server from a list of candidates. # # @example Select the primary server given a list of candidates. # preference = Mongo::ServerSelector::Primary.new # preference.select([candidate_1, candidate_2]) # # @return [ Array ] The primary server from the list of candidates. # # @since 2.0.0 def select(candidates) primary(candidates) end def max_staleness_allowed? false end end end end mongo-2.5.1/lib/mongo/server_selector/secondary_preferred.rb0000644000004100000410000000732713257253113024346 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo module ServerSelector # Encapsulates specifications for selecting servers, with # secondaries preferred, given a list of candidates. # # @since 2.0.0 class SecondaryPreferred include Selectable # Name of the this read preference in the server's format. # # @since 2.5.0 SERVER_FORMATTED_NAME = 'secondaryPreferred'.freeze # Get the name of the server mode type. # # @example Get the name of the server mode for this preference. # preference.name # # @return [ Symbol ] :secondary_preferred # # @since 2.0.0 def name :secondary_preferred end # Whether the slaveOk bit should be set on wire protocol messages. # I.e. whether the operation can be performed on a secondary server. # # @return [ true ] true # # @since 2.0.0 def slave_ok? true end # Whether tag sets are allowed to be defined for this server preference. # # @return [ true ] true # # @since 2.0.0 def tags_allowed? true end # Convert this server preference definition into a format appropriate # for a mongos server. # Note that the server preference is not sent to mongos as part of the query # selector if there are no tag sets, for maximum backwards compatibility. # # @example Convert this server preference definition into a format # for mongos. # preference = Mongo::ServerSelector::SecondaryPreferred.new # preference.to_mongos # # @return [ Hash ] The server preference formatted for a mongos server. # # @since 2.0.0 def to_mongos return nil if tag_sets.empty? && max_staleness.nil? to_doc end # Convert this server preference definition into a format appropriate # for a server. # # @example Convert this server preference definition into a format # for a server. # preference = Mongo::ServerSelector::SecondaryPreferred.new # preference.to_doc # # @return [ Hash ] The server preference formatted for a server. # # @since 2.5.0 def to_doc @doc ||= (preference = { mode: SERVER_FORMATTED_NAME } preference.merge!({ tags: tag_sets }) unless tag_sets.empty? preference.merge!({ maxStalenessSeconds: max_staleness }) if max_staleness preference) end private # Select servers taking into account any defined tag sets and # local threshold, with secondaries. # # @example Select servers given a list of candidates, # with secondaries preferred. # preference = Mongo::ServerSelector::SecondaryPreferred.new # preference.select([candidate_1, candidate_2]) # # @return [ Array ] A list of servers matching tag sets and acceptable # latency with secondaries preferred. # # @since 2.0.0 def select(candidates) near_servers(secondaries(candidates)) + primary(candidates) end def max_staleness_allowed? true end end end end mongo-2.5.1/lib/mongo/bulk_write/0000755000004100000410000000000013257253113016724 5ustar www-datawww-datamongo-2.5.1/lib/mongo/bulk_write/combineable.rb0000644000004100000410000000336013257253113021513 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Defines behaviour around combiners # # @api private # # @since 2.1.0 module Combineable # @return [ Array ] requests The provided requests. attr_reader :requests # @return [ true, false ] has_collation Whether one or more operations has a collation defined. attr_reader :has_collation # @return [ true, false ] has_array_filters Whether one or more operations specifies an array # filters option. attr_reader :has_array_filters # Create the ordered combiner. # # @api private # # @example Create the ordered combiner. # OrderedCombiner.new([{ insert_one: { _id: 0 }}]) # # @param [ Array ] requests The bulk requests. # # @since 2.1.0 def initialize(requests) @requests = requests @has_collation = false @has_array_filters = false end private def combine_requests(ops) requests.reduce(ops) do |operations, request| add(operations, request.keys.first, request.values.first) end end end end end mongo-2.5.1/lib/mongo/bulk_write/result.rb0000644000004100000410000001133713257253113020574 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Wraps a series of bulk write operations in a result object. # # @since 2.0.6 class Result # Constant for number removed. # # @since 2.1.0 REMOVED_COUNT = 'n_removed'.freeze # Constant for number inserted. # # @since 2.1.0 INSERTED_COUNT = 'n_inserted'.freeze # Constant for inserted ids. # # @since 2.1.0 INSERTED_IDS = 'inserted_ids'.freeze # Constant for number matched. # # @since 2.1.0 MATCHED_COUNT = 'n_matched'.freeze # Constant for number modified. # # @since 2.1.0 MODIFIED_COUNT = 'n_modified'.freeze # Constant for upserted. # # @since 2.1.0 UPSERTED = 'upserted'.freeze # Constant for number upserted. # # @since 2.1.0 UPSERTED_COUNT = 'n_upserted'.freeze # Constant for upserted ids. # # @since 2.1.0 UPSERTED_IDS = 'upserted_ids'.freeze # The fields contained in the result document returned from executing the # operations. # # @since 2.1.0. FIELDS = [ INSERTED_COUNT, REMOVED_COUNT, MODIFIED_COUNT, UPSERTED_COUNT, MATCHED_COUNT, Operation::Result::N ].freeze # Returns the number of documents deleted. # # @example Get the number of deleted documents. # result.deleted_count # # @return [ Integer ] The number deleted. # # @since 2.1.0 def deleted_count @results[REMOVED_COUNT] end # Create the new result object from the results document. # # @example Create the new result. # Result.new({ 'n_inserted' => 10 }) # # @param [ BSON::Document, Hash ] results The results document. # # @since 2.1.0 def initialize(results) @results = results end # Returns the number of documents inserted. # # @example Get the number of inserted documents. # result.inserted_count # # @return [ Integer ] The number inserted. # # @since 2.1.0 def inserted_count @results[INSERTED_COUNT] end # Get the inserted document ids, if the operation has inserts. # # @example Get the inserted ids. # result.inserted_ids # # @return [ Array ] The inserted ids. # # @since 2.1.0 def inserted_ids @results[INSERTED_IDS] end # Returns the number of documents matched. # # @example Get the number of matched documents. # result.matched_count # # @return [ Integer ] The number matched. # # @since 2.1.0 def matched_count @results[MATCHED_COUNT] end # Returns the number of documents modified. # # @example Get the number of modified documents. # result.modified_count # # @return [ Integer ] The number modified. # # @since 2.1.0 def modified_count @results[MODIFIED_COUNT] end # Returns the number of documents upserted. # # @example Get the number of upserted documents. # result.upserted_count # # @return [ Integer ] The number upserted. # # @since 2.1.0 def upserted_count @results[UPSERTED_COUNT] end # Get the upserted document ids, if the operation has inserts. # # @example Get the upserted ids. # result.upserted_ids # # @return [ Array ] The upserted ids. # # @since 2.1.0 def upserted_ids @results[UPSERTED_IDS] || [] end # Validates the bulk write result. # # @example Validate the result. # result.validate! # # @raise [ Error::BulkWriteError ] If the result contains errors. # # @return [ Result ] The result. # # @since 2.1.0 def validate! if @results[Error::WRITE_ERRORS] || @results[Error::WRITE_CONCERN_ERRORS] raise Error::BulkWriteError.new(@results) else self end end end end end mongo-2.5.1/lib/mongo/bulk_write/unordered_combiner.rb0000644000004100000410000000251313257253113023117 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Combines groups of bulk write operations in no order. # # @api private # # @since 2.1.0 class UnorderedCombiner include Transformable include Validatable include Combineable # Combine the requests in order. # # @api private # # @example Combine the requests. # combiner.combine # # @return [ Array ] The combined requests. # # @since 2.1.0 def combine combine_requests({}).map do |name, ops| { name => ops } end end private def add(operations, name, document) (operations[name] ||= []).push(transform(name, document)) operations end end end end mongo-2.5.1/lib/mongo/bulk_write/result_combiner.rb0000644000004100000410000000663613257253113022460 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Combines bulk write results together. # # @api private # # @since 2.1.0 class ResultCombiner # @return [ Integer ] count The count of documents in the entire batch. attr_reader :count # @return [ Hash ] results The results hash. attr_reader :results # Create the new result combiner. # # @api private # # @example Create the result combiner. # ResultCombiner.new # # @since 2.1.0 def initialize @results = {} @count = 0 end # Combines a result into the overall results. # # @api private # # @example Combine the result. # combiner.combine!(result, count) # # @param [ Operation::Result ] result The result to combine. # @param [ Integer ] count The count of requests in the batch. # # @since 2.1.0 def combine!(result, count) combine_counts!(result) combine_ids!(result) combine_errors!(result) @count += count end # Get the final result. # # @api private # # @example Get the final result. # combinator.result # # @return [ BulkWrite::Result ] The final result. # # @since 2.1.0 def result BulkWrite::Result.new(results).validate! end private def combine_counts!(result) Result::FIELDS.each do |field| if result.respond_to?(field) && value = result.send(field) results.merge!(field => (results[field] || 0) + value) end end end def combine_ids!(result) if result.respond_to?(Result::INSERTED_IDS) results[Result::INSERTED_IDS] = (results[Result::INSERTED_IDS] || []) + result.inserted_ids end if result.respond_to?(Result::UPSERTED) results[Result::UPSERTED_IDS] = (results[Result::UPSERTED_IDS] || []) + result.upserted.map{ |doc| doc['_id'] } end end def combine_errors!(result) combine_write_errors!(result) combine_write_concern_errors!(result) end def combine_write_errors!(result) if write_errors = result.aggregate_write_errors(count) results.merge!( Error::WRITE_ERRORS => ((results[Error::WRITE_ERRORS] || []) << write_errors).flatten ) else result.validate! end end def combine_write_concern_errors!(result) if write_concern_errors = result.aggregate_write_concern_errors(count) results[Error::WRITE_CONCERN_ERRORS] = (results[Error::WRITE_CONCERN_ERRORS] || []) + write_concern_errors end end end end end mongo-2.5.1/lib/mongo/bulk_write/ordered_combiner.rb0000644000004100000410000000267113257253113022561 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Combines groups of bulk write operations in order. # # @api private # # @since 2.1.0 class OrderedCombiner include Transformable include Validatable include Combineable # Combine the requests in order. # # @api private # # @example Combine the requests. # combiner.combine # # @return [ Array ] The combined requests. # # @since 2.1.0 def combine combine_requests([]) end private def add(operations, name, document) operations.push({ name => []}) if next_group?(name, operations) operations[-1][name].push(transform(name, document)) operations end def next_group?(name, operations) !operations[-1] || !operations[-1].key?(name) end end end end mongo-2.5.1/lib/mongo/bulk_write/validatable.rb0000644000004100000410000000370113257253113021522 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Defines behaviour around validations. # # @api private # # @since 2.1.0 module Validatable # Validate the document. # # @api private # # @example Validate the document. # validatable.validate(:insert_one, { _id: 0 }) # # @param [ Symbol ] name The operation name. # @param [ Hash, BSON::Document ] document The document. # # @raise [ InvalidBulkOperation ] If not valid. # # @return [ Hash, BSON::Document ] The document. # # @since 2.1.0 def validate(name, document) validate_operation(name) validate_document(name, document) if document.respond_to?(:keys) && (document[:collation] || document[Operation::COLLATION]) @has_collation = true end if document.respond_to?(:keys) && document[:array_filters] @has_array_filters = true end end private def validate_document(name, document) if document.respond_to?(:keys) || document.respond_to?(:data) document else raise Error::InvalidBulkOperation.new(name, document) end end def validate_operation(name) unless Transformable::MAPPERS.key?(name) raise Error::InvalidBulkOperationType.new(name) end end end end end mongo-2.5.1/lib/mongo/bulk_write/transformable.rb0000644000004100000410000000771213257253113022117 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class BulkWrite # Defines behaviour around transformations. # # @api private # # @since 2.1.0 module Transformable # The delete many model constant. # # @since 2.1.0 DELETE_MANY = :delete_many.freeze # The delete one model constant. # # @since 2.1.0 DELETE_ONE = :delete_one.freeze # The insert one model constant. # # @since 2.1.0 INSERT_ONE = :insert_one.freeze # The replace one model constant. # # @since 2.1.0 REPLACE_ONE = :replace_one.freeze # The update many model constant. # # @since 2.1.0 UPDATE_MANY = :update_many.freeze # The update one model constant. # # @since 2.1.0 UPDATE_ONE = :update_one.freeze # Proc to transform delete many ops. # # @since 2.1.0 DELETE_MANY_TRANSFORM = ->(doc){ { Operation::Q => doc[:filter], Operation::LIMIT => 0 }.tap do |d| d[Operation::COLLATION] = doc[:collation] if doc[:collation] end } # Proc to transform delete one ops. # # @since 2.1.0 DELETE_ONE_TRANSFORM = ->(doc){ { Operation::Q => doc[:filter], Operation::LIMIT => 1 }.tap do |d| d[Operation::COLLATION] = doc[:collation] if doc[:collation] end } # Proc to transform insert one ops. # # @since 2.1.0 INSERT_ONE_TRANSFORM = ->(doc){ doc } # Proc to transfor replace one ops. # # @since 2.1.0 REPLACE_ONE_TRANSFORM = ->(doc){ { Operation::Q => doc[:filter], Operation::U => doc[:replacement], Operation::MULTI => false, Operation::UPSERT => doc.fetch(:upsert, false) }.tap do |d| d[Operation::COLLATION] = doc[:collation] if doc[:collation] end } # Proc to transform update many ops. # # @since 2.1.0 UPDATE_MANY_TRANSFORM = ->(doc){ { Operation::Q => doc[:filter], Operation::U => doc[:update], Operation::MULTI => true, Operation::UPSERT => doc.fetch(:upsert, false) }.tap do |d| d[Operation::COLLATION] = doc[:collation] if doc[:collation] d[Operation::ARRAY_FILTERS] = doc[:array_filters] if doc[:array_filters] end } # Proc to transform update one ops. # # @since 2.1.0 UPDATE_ONE_TRANSFORM = ->(doc){ { Operation::Q => doc[:filter], Operation::U => doc[:update], Operation::MULTI => false, Operation::UPSERT => doc.fetch(:upsert, false) }.tap do |d| d[Operation::COLLATION] = doc[:collation] if doc[:collation] d[Operation::ARRAY_FILTERS] = doc[:array_filters] if doc[:array_filters] end } # Document mappers from the bulk api input into proper commands. # # @since 2.1.0 MAPPERS = { DELETE_MANY => DELETE_MANY_TRANSFORM, DELETE_ONE => DELETE_ONE_TRANSFORM, INSERT_ONE => INSERT_ONE_TRANSFORM, REPLACE_ONE => REPLACE_ONE_TRANSFORM, UPDATE_MANY => UPDATE_MANY_TRANSFORM, UPDATE_ONE => UPDATE_ONE_TRANSFORM }.freeze private def transform(name, document) validate(name, document) MAPPERS[name].call(document) end end end end mongo-2.5.1/lib/mongo/uri/0000755000004100000410000000000013257253113015354 5ustar www-datawww-datamongo-2.5.1/lib/mongo/uri/srv_protocol.rb0000644000004100000410000001416413257253113020442 0ustar www-datawww-data# Copyright (C) 2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'resolv' module Mongo class URI # Parser for a URI using the mongodb+srv protocol, which specifies a DNS to query for SRV records. # The driver will query the DNS server for SRV records on {hostname}.{domainname}, # prefixed with _mongodb._tcp # The SRV records can then be used as the seedlist for a Mongo::Client. # The driver also queries for a TXT record providing default connection string options. # Only one TXT record is allowed, and only a subset of Mongo::Client options is allowed. # # Please refer to the Initial DNS Seedlist Discovery spec for details. # # https://github.com/mongodb/specifications/blob/master/source/initial-dns-seedlist-discovery # # @example Use the uri string to make a client connection. # client = Mongo::Client.new('mongodb+srv://test6.test.build.10gen.cc/') # # @since 2.5.0 class SRVProtocol < URI # Gets the options hash that needs to be passed to a Mongo::Client on instantiation, so we # don't have to merge the txt record options, credentials, and database in at that point - # we only have a single point here. # # @example Get the client options. # uri.client_options # # @return [ Hash ] The options passed to the Mongo::Client # # @since 2.5.0 def client_options opts = @txt_options.merge(ssl: true) opts = opts.merge(uri_options).merge(:database => database) @user ? opts.merge(credentials) : opts end private RECORD_PREFIX = '_mongodb._tcp.'.freeze DOT_PARTITION = '.'.freeze VALID_TXT_OPTIONS = ['replicaset', 'authsource'].freeze INVALID_HOST = "One and only one host is required in a connection string with the " + "'#{MONGODB_SRV_SCHEME}' protocol.".freeze INVALID_PORT = "It is not allowed to specify a port in a connection string with the " + "'#{MONGODB_SRV_SCHEME}' protocol.".freeze INVALID_DOMAIN = "The domain name must consist of at least two parts: the domain name, " + "and a TLD.".freeze NO_SRV_RECORDS = "The DNS query returned no SRV records at hostname (%s)".freeze MORE_THAN_ONE_TXT_RECORD_FOUND = "Only one TXT record is allowed. Querying hostname (%s) " + "returned more than one result.".freeze INVALID_TXT_RECORD_OPTION = "TXT records can only specify the options " + "[#{VALID_TXT_OPTIONS.join(', ')}].".freeze MISMATCHED_DOMAINNAME = "Parent domain name in SRV record result (%s) does not match " + "that of the hostname (%s)".freeze FORMAT = 'mongodb+srv://[username:password@]host[/[database][?options]]'.freeze def scheme MONGODB_SRV_SCHEME end def raise_invalid_error!(details) raise Error::InvalidURI.new(@string, details, FORMAT) end def resolver @resolver ||= Resolv::DNS.new end def parse_creds_hosts!(string) hostname, creds = split_creds_hosts(string) validate_hostname!(hostname) records = get_records(hostname) @txt_options = get_txt_opts(hostname) || {} @servers = parse_servers!(records.join(',')) @user = parse_user!(creds) @password = parse_password!(creds) end def validate_hostname!(hostname) raise_invalid_error!(INVALID_HOST) if hostname.empty? raise_invalid_error!(INVALID_HOST) if hostname.include?(HOST_DELIM) raise_invalid_error!(INVALID_PORT) if hostname.include?(HOST_PORT_DELIM) _, _, domain = hostname.partition(DOT_PARTITION) raise_invalid_error!(INVALID_DOMAIN) unless domain.include?(DOT_PARTITION) end def get_records(hostname) query_name = RECORD_PREFIX + hostname records = resolver.getresources(query_name, Resolv::DNS::Resource::IN::SRV).collect do |record| record_host = record.target.to_s port = record.port validate_record!(record_host, hostname) "#{record_host}#{HOST_PORT_DELIM}#{port}" end raise Error::NoSRVRecords.new(NO_SRV_RECORDS % hostname) if records.empty? records end def validate_record!(record_host, hostname) domainname = hostname.split(DOT_PARTITION)[1..-1] host_parts = record_host.split(DOT_PARTITION) unless (host_parts.size > domainname.size) && (domainname == host_parts[-domainname.length..-1]) raise Error::MismatchedDomain.new(MISMATCHED_DOMAINNAME % [record_host, domainname]) end end def get_txt_opts(host) records = resolver.getresources(host, Resolv::DNS::Resource::IN::TXT) unless records.empty? if records.size > 1 raise Error::InvalidTXTRecord.new(MORE_THAN_ONE_TXT_RECORD_FOUND % host) end options_string = records[0].strings.join parse_txt_options!(options_string) end end def parse_txt_options!(string) return {} unless string string.split(INDIV_URI_OPTS_DELIM).reduce({}) do |txt_options, opt| raise Error::InvalidTXTRecord.new(INVALID_OPTS_VALUE_DELIM) unless opt.index(URI_OPTS_VALUE_DELIM) key, value = opt.split(URI_OPTS_VALUE_DELIM) raise Error::InvalidTXTRecord.new(INVALID_TXT_RECORD_OPTION) unless VALID_TXT_OPTIONS.include?(key.downcase) strategy = URI_OPTION_MAP[key.downcase] add_uri_option(strategy, value, txt_options) txt_options end end end end end mongo-2.5.1/lib/mongo/address/0000755000004100000410000000000013257253113016202 5ustar www-datawww-datamongo-2.5.1/lib/mongo/address/ipv4.rb0000644000004100000410000000525313257253113017416 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Address # Sets up resolution with IPv4 support if the address is an ip # address. # # @since 2.0.0 class IPv4 # @return [ String ] host The host. attr_reader :host # @return [ String ] host_name The original host name. attr_reader :host_name # @return [ Integer ] port The port. attr_reader :port # The regular expression to use to match an IPv4 ip address. # # @since 2.0.0 MATCH = Regexp.new('/\./').freeze # Split value constant. # # @since 2.1.0 SPLIT = ':'.freeze # Parse an IPv4 address into its host and port. # # @example Parse the address. # IPv4.parse("127.0.0.1:28011") # # @param [ String ] address The address to parse. # # @return [ Array ] The host and port pair. # # @since 2.0.0 def self.parse(address) parts = address.split(SPLIT) host = parts[0] port = (parts[1] || 27017).to_i [ host, port ] end # Initialize the IPv4 resolver. # # @example Initialize the resolver. # IPv4.new("127.0.0.1", 27017, 'localhost') # # @param [ String ] host The host. # @param [ Integer ] port The port. # # @since 2.0.0 def initialize(host, port, host_name=nil) @host = host @port = port @host_name = host_name end # Get a socket for the provided address type, given the options. # # @example Get an IPv4 socket. # ipv4.socket(5, :ssl => true) # # @param [ Float ] socket_timeout The socket timeout. # @param [ Hash ] ssl_options SSL options. # # @return [ Pool::Socket::SSL, Pool::Socket::TCP ] The socket. # # @since 2.0.0 def socket(socket_timeout, ssl_options = {}) unless ssl_options.empty? Socket::SSL.new(host, port, host_name, socket_timeout, Socket::PF_INET, ssl_options) else Socket::TCP.new(host, port, socket_timeout, Socket::PF_INET) end end end end end mongo-2.5.1/lib/mongo/address/ipv6.rb0000644000004100000410000000513013257253113017412 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Address # Sets up resolution with IPv6 support if the address is an ip # address. # # @since 2.0.0 class IPv6 # @return [ String ] host The host. attr_reader :host # @return [ String ] host_name The original host name. attr_reader :host_name # @return [ Integer ] port The port. attr_reader :port # The regular expression to use to match an IPv6 ip address. # # @since 2.0.0 MATCH = Regexp.new('::').freeze # Parse an IPv6 address into its host and port. # # @example Parse the address. # IPv4.parse("[::1]:28011") # # @param [ String ] address The address to parse. # # @return [ Array ] The host and port pair. # # @since 2.0.0 def self.parse(address) parts = address.match(/\[(.+)\]:?(.+)?/) host = parts[1] port = (parts[2] || 27017).to_i [ host, port ] end # Initialize the IPv6 resolver. # # @example Initialize the resolver. # IPv6.new("::1", 28011, 'localhost') # # @param [ String ] host The host. # @param [ Integer ] port The port. # # @since 2.0.0 def initialize(host, port, host_name=nil) @host = host @port = port @host_name = host_name end # Get a socket for the provided address type, given the options. # # @example Get an IPv6 socket. # ipv4.socket(5, :ssl => true) # # @param [ Float ] socket_timeout The socket timeout. # @param [ Hash ] ssl_options SSL options. # # @return [ Pool::Socket::SSL, Pool::Socket::TCP ] The socket. # # @since 2.0.0 def socket(socket_timeout, ssl_options = {}) unless ssl_options.empty? Socket::SSL.new(host, port, host_name, socket_timeout, Socket::PF_INET6, ssl_options) else Socket::TCP.new(host, port, socket_timeout, Socket::PF_INET6) end end end end end mongo-2.5.1/lib/mongo/address/unix.rb0000644000004100000410000000410413257253113017511 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Address # Sets up socket addresses. # # @since 2.0.0 class Unix # @return [ String ] host The host. attr_reader :host # @return [ nil ] port Will always be nil. attr_reader :port # The regular expression to use to match a socket path. # # @since 2.0.0 MATCH = Regexp.new('\.sock').freeze # Parse a socket path. # # @example Parse the address. # Unix.parse("/path/to/socket.sock") # # @param [ String ] address The address to parse. # # @return [ Array ] A list with the host (socket path). # # @since 2.0.0 def self.parse(address) [ address ] end # Initialize the socket resolver. # # @example Initialize the resolver. # Unix.new("/path/to/socket.sock", "/path/to/socket.sock") # # @param [ String ] host The host. # # @since 2.0.0 def initialize(host, port=nil, host_name=nil) @host = host end # Get a socket for the provided address type, given the options. # # @example Get a Unix socket. # address.socket(5) # # @param [ Float ] socket_timeout The socket timeout. # @param [ Hash ] ssl_options SSL options - ignored. # # @return [ Pool::Socket::Unix ] The socket. # # @since 2.0.0 def socket(socket_timeout, ssl_options = {}) Socket::Unix.new(host, socket_timeout) end end end end mongo-2.5.1/lib/mongo/socket.rb0000644000004100000410000001373313257253113016401 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'socket' require 'timeout' require 'mongo/socket/ssl' require 'mongo/socket/tcp' require 'mongo/socket/unix' module Mongo # Provides additional data around sockets for the driver's use. # # @since 2.0.0 class Socket include ::Socket::Constants # Error message for SSL related exceptions. # # @since 2.0.0 SSL_ERROR = 'SSL handshake failed. MongoDB may not be configured with SSL support.'.freeze # Error message for timeouts on socket calls. # # @since 2.0.0 TIMEOUT_ERROR = 'Socket request timed out'.freeze # The pack directive for timeouts. # # @since 2.0.0 TIMEOUT_PACK = 'l_2'.freeze # @return [ Integer ] family The type of host family. attr_reader :family # @return [ Socket ] socket The wrapped socket. attr_reader :socket # Is the socket connection alive? # # @example Is the socket alive? # socket.alive? # # @return [ true, false ] If the socket is alive. # # @deprecated Use #connectable? on the connection instead. def alive? sock_arr = [ @socket ] if Kernel::select(sock_arr, nil, sock_arr, 0) eof? else true end end # Close the socket. # # @example Close the socket. # socket.close # # @return [ true ] Always true. # # @since 2.0.0 def close @socket.close rescue true true end # Delegates gets to the underlying socket. # # @example Get the next line. # socket.gets(10) # # @param [ Array ] args The arguments to pass through. # # @return [ Object ] The returned bytes. # # @since 2.0.0 def gets(*args) handle_errors { @socket.gets(*args) } end # Create the new socket for the provided family - ipv4, piv6, or unix. # # @example Create a new ipv4 socket. # Socket.new(Socket::PF_INET) # # @param [ Integer ] family The socket domain. # # @since 2.0.0 def initialize(family) @family = family @socket = ::Socket.new(family, SOCK_STREAM, 0) set_socket_options(@socket) end # Will read all data from the socket for the provided number of bytes. # If no data is returned, an exception will be raised. # # @example Read all the requested data from the socket. # socket.read(4096) # # @param [ Integer ] length The number of bytes to read. # # @raise [ Mongo::SocketError ] If not all data is returned. # # @return [ Object ] The data from the socket. # # @since 2.0.0 def read(length) handle_errors do data = read_from_socket(length) raise IOError unless (data.length > 0 || length == 0) while data.length < length chunk = read_from_socket(length - data.length) raise IOError unless (chunk.length > 0 || length == 0) data << chunk end data end end # Read a single byte from the socket. # # @example Read a single byte. # socket.readbyte # # @return [ Object ] The read byte. # # @since 2.0.0 def readbyte handle_errors { @socket.readbyte } end # Writes data to the socket instance. # # @example Write to the socket. # socket.write(data) # # @param [ Array ] args The data to be written. # # @return [ Integer ] The length of bytes written to the socket. # # @since 2.0.0 def write(*args) handle_errors { @socket.write(*args) } end # Tests if this socket has reached EOF. Primarily used for liveness checks. # # @since 2.0.5 def eof? @socket.eof? rescue IOError, SystemCallError => _ true end private def read_from_socket(length) data = String.new deadline = (Time.now + timeout) if timeout begin while (data.length < length) data << @socket.read_nonblock(length - data.length) end rescue IO::WaitReadable select_timeout = (deadline - Time.now) if deadline if (select_timeout && select_timeout <= 0) || !Kernel::select([@socket], nil, [@socket], select_timeout) raise Timeout::Error.new("Took more than #{timeout} seconds to receive data.") end retry end data end def unix_socket?(sock) defined?(UNIXSocket) && sock.is_a?(UNIXSocket) end DEFAULT_TCP_KEEPINTVL = 10 DEFAULT_TCP_KEEPCNT = 9 DEFAULT_TCP_KEEPIDLE = 300 def set_keepalive_opts(sock) sock.setsockopt(SOL_SOCKET, SO_KEEPALIVE, true) set_option(sock, :TCP_KEEPINTVL, DEFAULT_TCP_KEEPINTVL) set_option(sock, :TCP_KEEPCNT, DEFAULT_TCP_KEEPCNT) set_option(sock, :TCP_KEEPIDLE, DEFAULT_TCP_KEEPIDLE) rescue end def set_option(sock, option, default) if Socket.const_defined?(option) system_default = sock.getsockopt(IPPROTO_TCP, option).int if system_default > default sock.setsockopt(IPPROTO_TCP, option, default) end end end def set_socket_options(sock) sock.set_encoding(BSON::BINARY) set_keepalive_opts(sock) end def handle_errors begin yield rescue Errno::ETIMEDOUT raise Error::SocketTimeoutError, TIMEOUT_ERROR rescue IOError, SystemCallError => e raise Error::SocketError, e.message rescue OpenSSL::SSL::SSLError raise Error::SocketError, SSL_ERROR end end end end mongo-2.5.1/lib/mongo/cursor.rb0000644000004100000410000001667613257253113016437 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/cursor/builder' module Mongo # Client-side representation of an iterator over a query result set on # the server. # # A +Cursor+ is not created directly by a user. Rather, +CollectionView+ # creates a +Cursor+ in an Enumerable module method. # # @example Get an array of 5 users named Emily. # users.find({:name => 'Emily'}).limit(5).to_a # # @example Call a block on each user doc. # users.find.each { |doc| puts doc } # # @note The +Cursor+ API is semipublic. # @api semipublic class Cursor extend Forwardable include Enumerable include Retryable def_delegators :@view, :collection def_delegators :collection, :client, :database def_delegators :@server, :cluster # @return [ Collection::View ] view The collection view. attr_reader :view # Creates a +Cursor+ object. # # @example Instantiate the cursor. # Mongo::Cursor.new(view, response, server) # # @param [ CollectionView ] view The +CollectionView+ defining the query. # @param [ Operation::Result ] result The result of the first execution. # @param [ Server ] server The server this cursor is locked to. # @param [ Hash ] options The cursor options. # # @option options [ true, false ] :disable_retry Whether to disable retrying on # error when sending getmores. # # @since 2.0.0 def initialize(view, result, server, options = {}) @view = view @server = server @initial_result = result @remaining = limit if limited? @cursor_id = result.cursor_id @coll_name = nil @options = options @session = @options[:session] register ObjectSpace.define_finalizer(self, self.class.finalize(result.cursor_id, cluster, kill_cursors_op_spec, server, @session)) end # Finalize the cursor for garbage collection. Schedules this cursor to be included # in a killCursors operation executed by the Cluster's CursorReaper. # # @example Finalize the cursor. # Cursor.finalize(id, cluster, op, server) # # @param [ Integer ] cursor_id The cursor's id. # @param [ Mongo::Cluster ] cluster The cluster associated with this cursor and its server. # @param [ Hash ] op_spec The killCursors operation specification. # @param [ Mongo::Server ] server The server to send the killCursors operation to. # # @return [ Proc ] The Finalizer. # # @since 2.3.0 def self.finalize(cursor_id, cluster, op_spec, server, session) proc do cluster.schedule_kill_cursor(cursor_id, op_spec, server) session.end_session if session && session.implicit? end end # Get a human-readable string representation of +Cursor+. # # @example Inspect the cursor. # cursor.inspect # # @return [ String ] A string representation of a +Cursor+ instance. # # @since 2.0.0 def inspect "#" end # Iterate through documents returned from the query. # # @example Iterate over the documents in the cursor. # cursor.each do |doc| # ... # end # # @return [ Enumerator ] The enumerator. # # @since 2.0.0 def each process(@initial_result).each { |doc| yield doc } while more? return kill_cursors if exhausted? get_more.each { |doc| yield doc } end end # Get the batch size. # # @example Get the batch size. # cursor.batch_size # # @return [ Integer ] The batch size. # # @since 2.2.0 def batch_size @view.batch_size && @view.batch_size > 0 ? @view.batch_size : limit end # Is the cursor closed? # # @example Is the cursor closed? # cursor.closed? # # @return [ true, false ] If the cursor is closed. # # @since 2.2.0 def closed? !more? end # Get the parsed collection name. # # @example Get the parsed collection name. # cursor.coll_name # # @return [ String ] The collection name. # # @since 2.2.0 def collection_name @coll_name || collection.name end # Get the cursor id. # # @example Get the cursor id. # cursor.id # # @note A cursor id of 0 means the cursor was closed on the server. # # @return [ Integer ] The cursor id. # # @since 2.2.0 def id @cursor_id end # Get the number of documents to return. Used on 3.0 and lower server # versions. # # @example Get the number to return. # cursor.to_return # # @return [ Integer ] The number of documents to return. # # @since 2.2.0 def to_return use_limit? ? @remaining : (batch_size || 0) end private def exhausted? limited? ? @remaining <= 0 : false end def get_more if @options[:disable_retry] process(get_more_operation.execute(@server)) else read_with_retry do process(get_more_operation.execute(@server)) end end end def get_more_operation if @server.features.find_command_enabled? Operation::Commands::GetMore.new(Builder::GetMoreCommand.new(self, @session).specification) else Operation::Read::GetMore.new(Builder::OpGetMore.new(self).specification) end end def kill_cursors unregister read_with_one_retry do kill_cursors_operation.execute(@server) end ensure end_session @cursor_id = 0 end def end_session @session.end_session if @session && @session.implicit? end def kill_cursors_operation if @server.features.find_command_enabled? Operation::Commands::Command.new(kill_cursors_op_spec) else Operation::KillCursors.new(kill_cursors_op_spec) end end def kill_cursors_op_spec if @server.features.find_command_enabled? Builder::KillCursorsCommand.new(self).specification else Builder::OpKillCursors.new(self).specification end end def limited? limit ? limit > 0 : false end def more? @cursor_id != 0 end def process(result) @remaining -= result.returned_count if limited? @coll_name ||= result.namespace.sub("#{database.name}.", '') if result.namespace unregister if result.cursor_id == 0 @cursor_id = result.cursor_id end_session if !more? result.documents end def use_limit? limited? && batch_size >= @remaining end def limit @view.send(:limit) end def register cluster.register_cursor(@cursor_id) end def unregister cluster.unregister_cursor(@cursor_id) end end end mongo-2.5.1/lib/mongo/error.rb0000644000004100000410000000673313257253113016244 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'forwardable' module Mongo # Base error class for all Mongo related errors. # # @since 2.0.0 class Error < StandardError # The error code field. # # @since 2.0.0 CODE = 'code'.freeze # An error field, MongoDB < 2.6 # # @since 2.0.0 ERR = '$err'.freeze # An error field, MongoDB < 2.4 # # @since 2.0.0 ERROR = 'err'.freeze # The standard error message field, MongoDB 3.0+ # # @since 2.0.0 ERRMSG = 'errmsg'.freeze # The constant for the writeErrors array. # # @since 2.0.0 WRITE_ERRORS = 'writeErrors'.freeze # The constant for a write concern error. # # @since 2.0.0 WRITE_CONCERN_ERROR = 'writeConcernError'.freeze # The constant for write concern errors. # # @since 2.1.0 WRITE_CONCERN_ERRORS = 'writeConcernErrors'.freeze # Constant for an unknown error. # # @since 2.0.0 UNKNOWN_ERROR = 8.freeze # Constant for a bad value error. # # @since 2.0.0 BAD_VALUE = 2.freeze # Constant for a Cursor not found error. # # @since 2.2.3 CURSOR_NOT_FOUND = 'Cursor not found.' end end require 'mongo/error/parser' require 'mongo/error/bulk_write_error' require 'mongo/error/closed_stream' require 'mongo/error/extra_file_chunk' require 'mongo/error/file_not_found' require 'mongo/error/operation_failure' require 'mongo/error/invalid_bulk_operation' require 'mongo/error/invalid_bulk_operation_type' require 'mongo/error/invalid_collection_name' require 'mongo/error/invalid_database_name' require 'mongo/error/invalid_document' require 'mongo/error/invalid_file' require 'mongo/error/invalid_file_revision' require 'mongo/error/invalid_min_pool_size' require 'mongo/error/invalid_application_name' require 'mongo/error/invalid_nonce' require 'mongo/error/invalid_replacement_document' require 'mongo/error/invalid_server_preference' require 'mongo/error/invalid_session' require 'mongo/error/invalid_signature' require 'mongo/error/invalid_txt_record' require 'mongo/error/invalid_update_document' require 'mongo/error/invalid_uri' require 'mongo/error/invalid_write_concern' require 'mongo/error/max_bson_size' require 'mongo/error/max_message_size' require 'mongo/error/mismatched_domain' require 'mongo/error/multi_index_drop' require 'mongo/error/need_primary_server' require 'mongo/error/no_server_available' require 'mongo/error/no_srv_records' require 'mongo/error/socket_error' require 'mongo/error/socket_timeout_error' require 'mongo/error/unchangeable_collection_option' require 'mongo/error/unexpected_chunk_length' require 'mongo/error/unexpected_response' require 'mongo/error/missing_file_chunk' require 'mongo/error/missing_resume_token' require 'mongo/error/unsupported_array_filters' require 'mongo/error/unknown_payload_type' require 'mongo/error/unsupported_collation' require 'mongo/error/unsupported_features' require 'mongo/error/unsupported_message_type' mongo-2.5.1/lib/mongo/server.rb0000644000004100000410000002036513257253113016416 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/server/connectable' require 'mongo/server/connection' require 'mongo/server/connection_pool' require 'mongo/server/context' require 'mongo/server/description' require 'mongo/server/monitor' module Mongo # Represents a single server on the server side that can be standalone, part of # a replica set, or a mongos. # # @since 2.0.0 class Server extend Forwardable include Monitoring::Publishable # @return [ String ] The configured address for the server. attr_reader :address # @return [ Cluster ] cluster The server cluster. attr_reader :cluster # @return [ Monitor ] monitor The server monitor. attr_reader :monitor # @return [ Hash ] The options hash. attr_reader :options # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # The default time in seconds to timeout a connection attempt. # # @since 2.4.3 CONNECT_TIMEOUT = 10.freeze # Get the description from the monitor and scan on monitor. def_delegators :monitor, :description, :scan!, :heartbeat_frequency, :last_scan, :compressor alias :heartbeat_frequency_seconds :heartbeat_frequency # Delegate convenience methods to the monitor description. def_delegators :description, :arbiter?, :features, :ghost?, :max_wire_version, :max_write_batch_size, :max_bson_object_size, :max_message_size, :tags, :average_round_trip_time, :mongos?, :other?, :primary?, :replica_set_name, :secondary?, :standalone?, :unknown?, :unknown!, :last_write_date, :logical_session_timeout # Get the app metadata from the cluster. def_delegators :cluster, :app_metadata, :cluster_time, :update_cluster_time def_delegators :features, :check_driver_support! # Is this server equal to another? # # @example Is the server equal to the other? # server == other # # @param [ Object ] other The object to compare to. # # @return [ true, false ] If the servers are equal. # # @since 2.0.0 def ==(other) return false unless other.is_a?(Server) address == other.address end # Get a new context for this server in which to send messages. # # @example Get the server context. # server.context # # @return [ Mongo::Server::Context ] context The server context. # # @since 2.0.0 # # @deprecated Will be removed in version 3.0 def context Context.new(self) end # Determine if a connection to the server is able to be established and # messages can be sent to it. # # @example Is the server connectable? # server.connectable? # # @return [ true, false ] If the server is connectable. # # @since 2.1.0 def connectable? with_connection do |connection| connection.connectable? end end # Disconnect the server from the connection. # # @example Disconnect the server. # server.disconnect! # # @return [ true ] Always tru with no exception. # # @since 2.0.0 def disconnect! pool.disconnect! monitor.stop! and true end # When the server is flagged for garbage collection, stop the monitor # thread. # # @example Finalize the object. # Server.finalize(monitor) # # @param [ Server::Monitor ] monitor The server monitor. # # @since 2.2.0 def self.finalize(monitor) proc { monitor.stop! } end # Instantiate a new server object. Will start the background refresh and # subscribe to the appropriate events. # # @api private # # @example Initialize the server. # Mongo::Server.new('127.0.0.1:27017', cluster, monitoring, listeners) # # @note Server must never be directly instantiated outside of a Cluster. # # @param [ Address ] address The host:port address to connect to. # @param [ Cluster ] cluster The cluster the server belongs to. # @param [ Monitoring ] monitoring The monitoring. # @param [ Event::Listeners ] event_listeners The event listeners. # @param [ Hash ] options The server options. # # @since 2.0.0 def initialize(address, cluster, monitoring, event_listeners, options = {}) @address = address @cluster = cluster @monitoring = monitoring @options = options.freeze publish_sdam_event( Monitoring::SERVER_OPENING, Monitoring::Event::ServerOpening.new(address, cluster.topology) ) @monitor = Monitor.new(address, event_listeners, options.merge(app_metadata: cluster.app_metadata)) monitor.scan! monitor.run! ObjectSpace.define_finalizer(self, self.class.finalize(monitor)) end # Get a pretty printed server inspection. # # @example Get the server inspection. # server.inspect # # @return [ String ] The nice inspection string. # # @since 2.0.0 def inspect "#" end # Get the connection pool for this server. # # @example Get the connection pool for the server. # server.pool # # @return [ Mongo::Pool ] The connection pool. # # @since 2.0.0 def pool @pool ||= cluster.pool(self) end # Determine if the provided tags are a subset of the server's tags. # # @example Are the provided tags a subset of the server's tags. # server.matches_tag_set?({ 'rack' => 'a', 'dc' => 'nyc' }) # # @param [ Hash ] tag_set The tag set to compare to the server's tags. # # @return [ true, false ] If the provided tags are a subset of the server's tags. # # @since 2.0.0 def matches_tag_set?(tag_set) tag_set.keys.all? do |k| tags[k] && tags[k] == tag_set[k] end end # Restart the server monitor. # # @example Restart the server monitor. # server.reconnect! # # @return [ true ] Always true. # # @since 2.1.0 def reconnect! monitor.restart! and true end # Execute a block of code with a connection, that is checked out of the # server's pool and then checked back in. # # @example Send a message with the connection. # server.with_connection do |connection| # connection.dispatch([ command ]) # end # # @return [ Object ] The result of the block execution. # # @since 2.3.0 def with_connection(&block) pool.with_connection(&block) end # Handle authentication failure. # # @example Handle possible authentication failure. # server.handle_auth_failure! do # Auth.get(user).login(self) # end # # @raise [ Auth::Unauthorized ] If the authentication failed. # # @return [ Object ] The result of the block execution. # # @since 2.3.0 def handle_auth_failure! yield rescue Auth::Unauthorized unknown! raise end # Will writes sent to this server be retried. # # @example Will writes be retried. # server.retry_writes? # # @return [ true, false ] If writes will be retried. # # @note Retryable writes are only available on server versions 3.6+ and with # sharded clusters or replica sets. # # @since 2.5.0 def retry_writes? !!(features.sessions_enabled? && logical_session_timeout && !standalone?) end end end mongo-2.5.1/lib/mongo/monitoring.rb0000644000004100000410000001450713257253113017276 0ustar www-datawww-data# Copyright (C) 2015-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/monitoring/event' require 'mongo/monitoring/publishable' require 'mongo/monitoring/command_log_subscriber' require 'mongo/monitoring/sdam_log_subscriber' require 'mongo/monitoring/server_description_changed_log_subscriber' require 'mongo/monitoring/server_closed_log_subscriber' require 'mongo/monitoring/server_opening_log_subscriber' require 'mongo/monitoring/topology_changed_log_subscriber' require 'mongo/monitoring/topology_opening_log_subscriber' module Mongo # The class defines behaviour for the performance monitoring API. # # @since 2.1.0 class Monitoring # The command topic. # # @since 2.1.0 COMMAND = 'Command'.freeze # Server closed topic. # # @since 2.4.0 SERVER_CLOSED = 'ServerClosed'.freeze # Server description changed topic. # # @since 2.4.0 SERVER_DESCRIPTION_CHANGED = 'ServerDescriptionChanged'.freeze # Server opening topic. # # @since 2.4.0 SERVER_OPENING = 'ServerOpening'.freeze # Topology changed topic. # # @since 2.4.0 TOPOLOGY_CHANGED = 'TopologyChanged'.freeze # Topology closed topic. # # @since 2.4.0 TOPOLOGY_CLOSED = 'TopologyClosed'.freeze # Topology opening topic. # # @since 2.4.0 TOPOLOGY_OPENING = 'TopologyOpening'.freeze @@operation_id = 0 @@operation_id_lock = Mutex.new # Used for generating unique operation ids to link events together. # # @example Get the next operation id. # Monitoring.next_operation_id # # @return [ Integer ] The next operation id. # # @since 2.1.0 def self.next_operation_id @@operation_id_lock.synchronize do @@operation_id += 1 end end # Provides behaviour around global subscribers. # # @since 2.1.0 module Global extend self # Subscribe a listener to an event topic. # # @example Subscribe to the topic. # Monitoring::Global.subscribe(QUERY, subscriber) # # @param [ String ] topic The event topic. # @param [ Object ] subscriber The subscriber to handle the event. # # @since 2.1.0 def subscribe(topic, subscriber) subscribers_for(topic).push(subscriber) end # Get all the global subscribers. # # @example Get all the global subscribers. # Monitoring::Global.subscribers # # @return [ Hash ] The subscribers. # # @since 2.1.0 def subscribers @subscribers ||= {} end private def subscribers_for(topic) subscribers[topic] ||= [] end end # Initialize the monitoring. # # @api private # # @example Create the new monitoring. # Monitoring.new(:monitoring => true) # # @param [ Hash ] options The options. # # @since 2.1.0 def initialize(options = {}) if options[:monitoring] != false Global.subscribers.each do |topic, subscribers| subscribers.each do |subscriber| subscribe(topic, subscriber) end end subscribe(COMMAND, CommandLogSubscriber.new(options)) subscribe(SERVER_OPENING, ServerOpeningLogSubscriber.new(options)) subscribe(SERVER_CLOSED, ServerClosedLogSubscriber.new(options)) subscribe(SERVER_DESCRIPTION_CHANGED, ServerDescriptionChangedLogSubscriber.new(options)) subscribe(TOPOLOGY_OPENING, TopologyOpeningLogSubscriber.new(options)) subscribe(TOPOLOGY_CHANGED, TopologyChangedLogSubscriber.new(options)) end end # Publish a started event. # # @example Publish a started event. # monitoring.started(COMMAND, event) # # @param [ String ] topic The event topic. # @param [ Event ] event The event to publish. # # @since 2.1.0 def started(topic, event) subscribers_for(topic).each{ |subscriber| subscriber.started(event) } end # Publish a succeeded event. # # @example Publish a succeeded event. # monitoring.succeeded(COMMAND, event) # # @param [ String ] topic The event topic. # @param [ Event ] event The event to publish. # # @since 2.1.0 def succeeded(topic, event) subscribers_for(topic).each{ |subscriber| subscriber.succeeded(event) } end # Publish a failed event. # # @example Publish a failed event. # monitoring.failed(COMMAND, event) # # @param [ String ] topic The event topic. # @param [ Event ] event The event to publish. # # @since 2.1.0 def failed(topic, event) subscribers_for(topic).each{ |subscriber| subscriber.failed(event) } end # Subscribe a listener to an event topic. # # @example Subscribe to the topic. # monitoring.subscribe(QUERY, subscriber) # # @param [ String ] topic The event topic. # @param [ Object ] subscriber The subscriber to handle the event. # # @since 2.1.0 def subscribe(topic, subscriber) subscribers_for(topic).push(subscriber) end # Get all the subscribers. # # @example Get all the subscribers. # monitoring.subscribers # # @return [ Hash ] The subscribers. # # @since 2.1.0 def subscribers @subscribers ||= {} end # Determine if there are any subscribers for a particular event. # # @example Are there subscribers? # monitoring.subscribers?(COMMAND) # # @param [ String ] topic The event topic. # # @return [ true, false ] If there are subscribers for the topic. # # @since 2.1.0 def subscribers?(topic) !subscribers_for(topic).empty? end private def initialize_copy(original) @subscribers = original.subscribers.dup end def subscribers_for(topic) subscribers[topic] ||= [] end end end mongo-2.5.1/lib/mongo/database/0000755000004100000410000000000013257253113016321 5ustar www-datawww-datamongo-2.5.1/lib/mongo/database/view.rb0000644000004100000410000000733313257253113017626 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Database # A class representing a view of a database. # # @since 2.0.0 class View extend Forwardable include Enumerable def_delegators :@database, :cluster, :read_preference, :client def_delegators :cluster, :next_primary # @return [ Integer ] batch_size The size of the batch of results # when sending the listCollections command. attr_reader :batch_size # @return [ Integer ] limit The limit when sending a command. attr_reader :limit # @return [ Collection ] collection The command collection. attr_reader :collection # Get all the names of the non system collections in the database. # # @example Get the collection names. # database.collection_names # # @param [ Hash ] options Options for the listCollections command. # # @option options [ Integer ] :batch_size The batch size for results # returned from the listCollections command. # # @return [ Array ] The names of all non-system collections. # # @since 2.0.0 def collection_names(options = {}) @batch_size = options[:batch_size] server = next_primary(false) @limit = -1 if server.features.list_collections_enabled? session = client.send(:get_session, options) collections_info(server, session).collect do |info| if server.features.list_collections_enabled? info[Database::NAME] else (info[Database::NAME] && info[Database::NAME].sub("#{@database.name}.", '')) end end end # Get info on all the collections in the database. # # @example Get info on each collection. # database.list_collections # # @return [ Array ] Info for each collection in the database. # # @since 2.0.5 def list_collections session = client.send(:get_session) collections_info(next_primary(false), session) end # Create the new database view. # # @example Create the new database view. # View::Index.new(database) # # @param [ Database ] database The database. # # @since 2.0.0 def initialize(database) @database = database @batch_size = nil @limit = nil @collection = @database[Database::COMMAND] end private def collections_info(server, session, &block) cursor = Cursor.new(self, send_initial_query(server, session), server, session: session) cursor.each do |doc| yield doc end if block_given? cursor.to_enum end def collections_info_spec(session) { selector: { listCollections: 1, cursor: batch_size ? { batchSize: batch_size } : {} }, db_name: @database.name, session: session } end def initial_query_op(session) Operation::Commands::CollectionsInfo.new(collections_info_spec(session)) end def send_initial_query(server, session) initial_query_op(session).execute(server) end end end end mongo-2.5.1/lib/mongo/monitoring/0000755000004100000410000000000013257253113016742 5ustar www-datawww-datamongo-2.5.1/lib/mongo/monitoring/command_log_subscriber.rb0000644000004100000410000000567613257253113024007 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to command events and logs them. # # @since 2.1.0 class CommandLogSubscriber include Loggable # @return [ Hash ] options The options. attr_reader :options # Constant for the max number of characters to print when inspecting # a query field. # # @since 2.1.0 LOG_STRING_LIMIT = 250 # Create the new log subscriber. # # @example Create the log subscriber. # CommandLogSubscriber.new # # @param [ Hash ] options The options. # # @option options [ Logger ] :logger An optional custom logger. # # @since 2.1.0 def initialize(options = {}) @options = options end # Handle the command started event. # # @example Handle the event. # subscriber.started(event) # # @param [ CommandStartedEvent ] event The event. # # @since 2.1.0 def started(event) if logger.debug? log_debug("#{prefix(event)} | STARTED | #{format_command(event.command)}") end end # Handle the command succeeded event. # # @example Handle the event. # subscriber.succeeded(event) # # @param [ CommandSucceededEvent ] event The event. # # @since 2.1.0 def succeeded(event) if logger.debug? log_debug("#{prefix(event)} | SUCCEEDED | #{event.duration}s") end end # Handle the command failed event. # # @example Handle the event. # subscriber.failed(event) # # @param [ CommandFailedEvent ] event The event. # # @since 2.1.0 def failed(event) if logger.debug? log_debug("#{prefix(event)} | FAILED | #{event.message} | #{event.duration}s") end end private def format_command(args) begin truncating? ? truncate(args) : args.inspect rescue Exception '' end end def prefix(event) "#{event.address.to_s} | #{event.database_name}.#{event.command_name}" end def truncate(command) ((s = command.inspect).length > LOG_STRING_LIMIT) ? "#{s[0..LOG_STRING_LIMIT]}..." : s end def truncating? @truncating ||= (options[:truncate_logs] != false) end end end end mongo-2.5.1/lib/mongo/monitoring/publishable.rb0000644000004100000410000000705113257253113021564 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Defines behaviour for an object that can publish monitoring events. # # @since 2.1.0 module Publishable # @return [ Monitoring ] monitoring The monitoring. attr_reader :monitoring # Publish a command event to the global monitoring. # # @example Publish a command event. # publish_command do |messages| # # ... # end # # @param [ Array ] messages The messages. # # @return [ Object ] The result of the yield. # # @since 2.1.0 def publish_command(messages, operation_id = Monitoring.next_operation_id) start = Time.now message = messages.first message.set_request_id payload = message.payload send_duration = duration(start) command_started(address, operation_id, payload) receive_start = Time.now begin result = yield(messages) total_duration = duration(receive_start) + send_duration command_completed(result, address, operation_id, payload, total_duration) result rescue Exception => e total_duration = duration(receive_start) + send_duration command_failed(address, operation_id, payload, e.message, total_duration) raise e end end def publish_event(topic, event) monitoring.succeeded(topic, event) end def publish_sdam_event(topic, event) monitoring.succeeded(topic, event) if monitoring? end private def command_started(address, operation_id, payload) monitoring.started( Monitoring::COMMAND, Event::CommandStarted.generate(address, operation_id, payload) ) end def command_completed(result, address, operation_id, payload, duration) document = result ? (result.documents || []).first : nil if error?(document) parser = Error::Parser.new(document) command_failed(address, operation_id, payload, parser.message, duration) else command_succeeded(result, address, operation_id, payload, duration) end end def command_succeeded(result, address, operation_id, payload, duration) monitoring.succeeded( Monitoring::COMMAND, Event::CommandSucceeded.generate( address, operation_id, payload, result ? result.payload : nil, duration ) ) end def command_failed(address, operation_id, payload, message, duration) monitoring.failed( Monitoring::COMMAND, Event::CommandFailed.generate(address, operation_id, payload, message, duration) ) end def duration(start) Time.now - start end def error?(document) document && (document['ok'] == 0 || document.key?('$err')) end def monitoring? options[:monitoring] != false end end end end mongo-2.5.1/lib/mongo/monitoring/sdam_log_subscriber.rb0000644000004100000410000000267113257253113023305 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to SDAM events and logs them. # # @since 2.4.0 class SDAMLogSubscriber include Loggable # @return [ Hash ] options The options. attr_reader :options # Create the new log subscriber. # # @example Create the log subscriber. # SDAMLogSubscriber.new # # @param [ Hash ] options The options. # # @option options [ Logger ] :logger An optional custom logger. # # @since 2.4.0 def initialize(options = {}) @options = options end # Handle the SDAM succeeded event. # # @example Handle the event. # subscriber.succeeded(event) # # @param [ Event ] event The event. # # @since 2.4.0 def succeeded(event) log_event(event) if logger.debug? end end end end mongo-2.5.1/lib/mongo/monitoring/event.rb0000644000004100000410000000206013257253113020406 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'mongo/monitoring/event/secure' require 'mongo/monitoring/event/command_started' require 'mongo/monitoring/event/command_succeeded' require 'mongo/monitoring/event/command_failed' require 'mongo/monitoring/event/server_closed' require 'mongo/monitoring/event/server_description_changed' require 'mongo/monitoring/event/server_opening' require 'mongo/monitoring/event/topology_changed' require 'mongo/monitoring/event/topology_closed' require 'mongo/monitoring/event/topology_opening' mongo-2.5.1/lib/mongo/monitoring/topology_opening_log_subscriber.rb0000644000004100000410000000162713257253113025754 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to Topology Openeing events and logs them. # # @since 2.4.0 class TopologyOpeningLogSubscriber < SDAMLogSubscriber private def log_event(event) log_debug("Topology type '#{event.topology.display_name.downcase}' initializing.") end end end end mongo-2.5.1/lib/mongo/monitoring/topology_changed_log_subscriber.rb0000644000004100000410000000236013257253113025701 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to Topology Changed events and logs them. # # @since 2.4.0 class TopologyChangedLogSubscriber < SDAMLogSubscriber private def log_event(event) if event.previous_topology != event.new_topology log_debug( "Topology type '#{event.previous_topology.display_name.downcase}' changed to " + "type '#{event.new_topology.display_name.downcase}'." ) else log_debug( "There was a change in the members of the '#{event.new_topology.display_name.downcase}' " + "topology." ) end end end end end mongo-2.5.1/lib/mongo/monitoring/event/0000755000004100000410000000000013257253113020063 5ustar www-datawww-datamongo-2.5.1/lib/mongo/monitoring/event/server_closed.rb0000644000004100000410000000244013257253113023247 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when the server is closed. # # @since 2.4.0 class ServerClosed # @return [ Address ] address The server address. attr_reader :address # @return [ Topology ] topology The topology. attr_reader :topology # Create the event. # # @example Create the event. # ServerClosed.new(address) # # @param [ Address ] address The server address. # @param [ Integer ] topology The topology. # # @since 2.4.0 def initialize(address, topology) @address = address @topology = topology end end end end end mongo-2.5.1/lib/mongo/monitoring/event/topology_changed.rb0000644000004100000410000000266513257253113023746 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when the topology changes. # # @since 2.4.0 class TopologyChanged # @return [ Cluster::Topology ] previous_topology The previous topology. attr_reader :previous_topology # @return [ Cluster::Topology ] new_topology The new topology. attr_reader :new_topology # Create the event. # # @example Create the event. # TopologyChanged.new(previous, new) # # @param [ Cluster::Topology ] previous_topology The previous topology. # @param [ Cluster::Topology ] new_topology The new topology. # # @since 2.4.0 def initialize(previous_topology, new_topology) @previous_topology = previous_topology @new_topology = new_topology end end end end end mongo-2.5.1/lib/mongo/monitoring/event/secure.rb0000644000004100000410000000436313257253113021704 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Provides behaviour to redact sensitive information from commands and # replies. # # @since 2.1.0 module Secure # The list of commands that has the data redacted for security. # # @since 2.1.0 REDACTED_COMMANDS = [ 'authenticate', 'saslStart', 'saslContinue', 'getnonce', 'createUser', 'updateUser', 'copydbgetnonce', 'copydbsaslstart', 'copydb' ].freeze # Redact secure information from the document if it's command is in the # list. # # @example Get the redacted document. # secure.redacted(command_name, document) # # @param [ String, Symbol ] command_name The command name. # @param [ BSON::Document ] document The document. # # @return [ BSON::Document ] The redacted document. # # @since 2.1.0 def redacted(command_name, document) REDACTED_COMMANDS.include?(command_name.to_s) ? BSON::Document.new : document end # Is compression allowed for a given command message. # # @example Determine if compression is allowed for a given command. # secure.compression_allowed?(selector) # # @param [ String, Symbol ] command_name The command name. # # @return [ true, false ] Whether compression can be used. # # @since 2.5.0 def compression_allowed?(command_name) @compression_allowed ||= !REDACTED_COMMANDS.include?(command_name.to_s) end end end end end mongo-2.5.1/lib/mongo/monitoring/event/command_failed.rb0000644000004100000410000000633513257253113023341 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event that is fired when a command operation fails. # # @since 2.1.0 class CommandFailed # @return [ Server::Address ] address The server address. attr_reader :address # @return [ String ] command_name The name of the command. attr_reader :command_name # @return [ String ] database_name The name of the database_name. attr_reader :database_name # @return [ Float ] duration The duration of the command in seconds. attr_reader :duration # @return [ String ] message The error message. attr_reader :message # @return [ Integer ] operation_id The operation id. attr_reader :operation_id # @return [ Integer ] request_id The request id. attr_reader :request_id # Create the new event. # # @example Create the event. # # @param [ String ] command_name The name of the command. # @param [ String ] database_name The database_name name. # @param [ Server::Address ] address The server address. # @param [ Integer ] request_id The request id. # @param [ Integer ] operation_id The operation id. # @param [ String ] message The error message. # @param [ Float ] duration The duration the command took in seconds. # # @since 2.1.0 def initialize(command_name, database_name, address, request_id, operation_id, message, duration) @command_name = command_name @database_name = database_name @address = address @request_id = request_id @operation_id = operation_id @message = message @duration = duration end # Create the event from a wire protocol message payload. # # @example Create the event. # CommandFailed.generate(address, 1, payload, duration) # # @param [ Server::Address ] address The server address. # @param [ Integer ] operation_id The operation id. # @param [ Hash ] payload The message payload. # @param [ String ] message The error message. # @param [ Float ] duration The duration of the command in seconds. # # @return [ CommandFailed ] The event. # # @since 2.1.0 def self.generate(address, operation_id, payload, message, duration) new( payload[:command_name], payload[:database_name], address, payload[:request_id], operation_id, message, duration ) end end end end end mongo-2.5.1/lib/mongo/monitoring/event/topology_opening.rb0000644000004100000410000000216313257253113024005 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when the topology is opening. # # @since 2.4.0 class TopologyOpening # @return [ Topology ] topology The topology. attr_reader :topology # Create the event. # # @example Create the event. # TopologyOpening.new(topology) # # @param [ Integer ] topology The topology. # # @since 2.4.0 def initialize(topology) @topology = topology end end end end end mongo-2.5.1/lib/mongo/monitoring/event/server_opening.rb0000644000004100000410000000244313257253113023440 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when the server is opening. # # @since 2.4.0 class ServerOpening # @return [ Address ] address The server address. attr_reader :address # @return [ Topology ] topology The topology. attr_reader :topology # Create the event. # # @example Create the event. # ServerOpening.new(address) # # @param [ Address ] address The server address. # @param [ Integer ] topology The topology. # # @since 2.4.0 def initialize(address, topology) @address = address @topology = topology end end end end end mongo-2.5.1/lib/mongo/monitoring/event/command_started.rb0000644000004100000410000000563113257253113023561 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event that is fired when a command operation starts. # # @since 2.1.0 class CommandStarted include Secure # @return [ Server::Address ] address The server address. attr_reader :address # @return [ BSON::Document ] command The command arguments. attr_reader :command # @return [ String ] command_name The name of the command. attr_reader :command_name # @return [ String ] database_name The name of the database_name. attr_reader :database_name # @return [ Integer ] operation_id The operation id. attr_reader :operation_id # @return [ Integer ] request_id The request id. attr_reader :request_id # Create the new event. # # @example Create the event. # # @param [ String ] command_name The name of the command. # @param [ String ] database_name The database_name name. # @param [ Server::Address ] address The server address. # @param [ Integer ] request_id The request id. # @param [ Integer ] operation_id The operation id. # @param [ BSON::Document ] command The command arguments. # # @since 2.1.0 def initialize(command_name, database_name, address, request_id, operation_id, command) @command_name = command_name @database_name = database_name @address = address @request_id = request_id @operation_id = operation_id @command = redacted(command_name, command) end # Create the event from a wire protocol message payload. # # @example Create the event. # CommandStarted.generate(address, 1, payload) # # @param [ Server::Address ] address The server address. # @param [ Integer ] operation_id The operation id. # @param [ Hash ] payload The message payload. # # @return [ CommandStarted ] The event. # # @since 2.1.0 def self.generate(address, operation_id, payload) new( payload[:command_name], payload[:database_name], address, payload[:request_id], operation_id, payload[:command] ) end end end end end mongo-2.5.1/lib/mongo/monitoring/event/command_succeeded.rb0000644000004100000410000000777613257253113024053 0ustar www-datawww-data# Copyright (C) 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event that is fired when a command operation succeeds. # # @since 2.1.0 class CommandSucceeded include Secure # @return [ Server::Address ] address The server address. attr_reader :address # @return [ String ] command_name The name of the command. attr_reader :command_name # @return [ BSON::Document ] reply The command reply. attr_reader :reply # @return [ String ] database_name The name of the database. attr_reader :database_name # @return [ Float ] duration The duration of the event. attr_reader :duration # @return [ Integer ] operation_id The operation id. attr_reader :operation_id # @return [ Integer ] request_id The request id. attr_reader :request_id # Create the new event. # # @example Create the event. # # @param [ String ] command_name The name of the command. # @param [ String ] database_name The database name. # @param [ Server::Address ] address The server address. # @param [ Integer ] request_id The request id. # @param [ Integer ] operation_id The operation id. # @param [ BSON::Document ] reply The command reply. # @param [ Float ] duration The duration the command took in seconds. # # @since 2.1.0 def initialize(command_name, database_name, address, request_id, operation_id, reply, duration) @command_name = command_name @database_name = database_name @address = address @request_id = request_id @operation_id = operation_id @reply = redacted(command_name, reply) @duration = duration end # Create the event from a wire protocol message payload. # # @example Create the event. # CommandSucceeded.generate(address, 1, command_payload, reply_payload, 0.5) # # @param [ Server::Address ] address The server address. # @param [ Integer ] operation_id The operation id. # @param [ Hash ] command_payload The command message payload. # @param [ Hash ] reply_payload The reply message payload. # @param [ Float ] duration The duration of the command in seconds. # # @return [ CommandCompleted ] The event. # # @since 2.1.0 def self.generate(address, operation_id, command_payload, reply_payload, duration) new( command_payload[:command_name], command_payload[:database_name], address, command_payload[:request_id], operation_id, generate_reply(command_payload, reply_payload), duration ) end private def self.generate_reply(command_payload, reply_payload) if reply_payload reply = reply_payload[:reply] if cursor = reply[:cursor] if !cursor.key?(Collection::NS) cursor.merge!(Collection::NS => namespace(command_payload)) end end reply else BSON::Document.new(Operation::Result::OK => 1) end end def self.namespace(payload) command = payload[:command] "#{payload[:database_name]}.#{command[:collection] || command.values.first}" end end end end end mongo-2.5.1/lib/mongo/monitoring/event/server_description_changed.rb0000644000004100000410000000365313257253113026001 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when a server's description changes. # # @since 2.4.0 class ServerDescriptionChanged # @return [ Address ] address The server address. attr_reader :address # @return [ Topology ] topology The topology. attr_reader :topology # @return [ Server::Description ] previous_description The previous server # description. attr_reader :previous_description # @return [ Server::Description ] new_description The new server # description. attr_reader :new_description # Create the event. # # @example Create the event. # ServerDescriptionChanged.new(address, topology, previous, new) # # @param [ Address ] address The server address. # @param [ Integer ] topology The topology. # @param [ Server::Description ] previous_description The previous description. # @param [ Server::Description ] new_description The new description. # # @since 2.4.0 def initialize(address, topology, previous_description, new_description) @address = address @topology = topology @previous_description = previous_description @new_description = new_description end end end end end mongo-2.5.1/lib/mongo/monitoring/event/topology_closed.rb0000644000004100000410000000215513257253113023620 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring module Event # Event fired when the topology closes. # # @since 2.4.0 class TopologyClosed # @return [ Topology ] topology The topology. attr_reader :topology # Create the event. # # @example Create the event. # TopologyClosed.new(topology) # # @param [ Integer ] topology The topology. # # @since 2.4.0 def initialize(topology) @topology = topology end end end end end mongo-2.5.1/lib/mongo/monitoring/server_description_changed_log_subscriber.rb0000644000004100000410000000202213257253113027731 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to Server Description Changed events and logs them. # # @since 2.4.0 class ServerDescriptionChangedLogSubscriber < SDAMLogSubscriber private def log_event(event) log_debug( "Server description for #{event.address} changed from " + "'#{event.previous_description.server_type}' to '#{event.new_description.server_type}'." ) end end end end mongo-2.5.1/lib/mongo/monitoring/server_closed_log_subscriber.rb0000644000004100000410000000156513257253113025221 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to Server Closed events and logs them. # # @since 2.4.0 class ServerClosedLogSubscriber < SDAMLogSubscriber private def log_event(event) log_debug("Server #{event.address} connection closed.") end end end end mongo-2.5.1/lib/mongo/monitoring/server_opening_log_subscriber.rb0000644000004100000410000000156213257253113025404 0ustar www-datawww-data# Copyright (C) 2016 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module Mongo class Monitoring # Subscribes to Server Opening events and logs them. # # @since 2.4.0 class ServerOpeningLogSubscriber < SDAMLogSubscriber private def log_event(event) log_debug("Server #{event.address} initializing.") end end end end mongo-2.5.1/lib/mongo.rb0000644000004100000410000000241113257253113015100 0ustar www-datawww-data# Copyright (C) 2014-2017 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'forwardable' require 'bson' require 'openssl' require 'mongo/bson' require 'mongo/options' require 'mongo/loggable' require 'mongo/monitoring' require 'mongo/logger' require 'mongo/retryable' require 'mongo/operation' require 'mongo/error' require 'mongo/event' require 'mongo/address' require 'mongo/auth' require 'mongo/protocol' require 'mongo/client' require 'mongo/cluster' require 'mongo/cursor' require 'mongo/collection' require 'mongo/database' require 'mongo/dbref' require 'mongo/grid' require 'mongo/index' require 'mongo/server' require 'mongo/server_selector' require 'mongo/session' require 'mongo/socket' require 'mongo/uri' require 'mongo/version' require 'mongo/write_concern' mongo-2.5.1/lib/csasl/0000755000004100000410000000000013257253113014543 5ustar www-datawww-datamongo-2.5.1/lib/csasl/csasl.bundle0000755000004100000410000002352013257253113017050 0ustar www-datawww-data(__TEXT__text__TEXT a __stubs__TEXT r __stub_helper__TEXT __cstring__TEXT__unwind_info__TEXTplp__eh_frame__TEXT__DATA__nl_symbol_ptr__DATA__got__DATA__la_symbol_ptr__DATA  __common__DATA__bss__DATAH__LINKEDIT  P"0  X` "Ph"! %0 P x$*v|Dr>nY_$ * 0/usr/lib/libz.1.dylib 8/usr/lib/libsasl2.2.dylib 8/usr/lib/libSystem.B.dylib 8/usr/lib/libobjc.A.dylib&X")h"UH]DUHAVSL5IH5xHuH=nBHH5XHHH5NHuH=BHH5.HIH5!HHH5HgHH=H5H1H=H5HI>H5H1[A^]mUHAWAVAUATSPMIIIH1jH5t1HPH9H5HL'H5HLH5HLH5HLHH[A\A]A^A_]UHAWAVAUATSHIL%"M$$LeHaHhHDž@HHLHDž@HLW)HDžL5LELL8I uIMvL=LLLL uHH@H`H $L11E1LHtH`E1H'H8H`H nE11H5RLHH`H52HxLtLh1uIHxtHLpvE1xpƄ5HIL;euLHĈ[A\A]A^A_]8f.UHAWAVSH( L=9M?L}HH5yHþ HLs HHH uHHHHGHHL1ۅxk1HLL1LSx@HHLB1ۅxHHL;}uHH( [A^A_]UHH50]ffffff.UHAVSIHӁwGH5}HHPHH uHH@H1MtHu HHH A[A^]UHHt]c]Ð%%%%%%%%%%%%%%%%%%%hZhPh)FhE<hb2hz(hhh hLAS%hhhh h9hOhdhvhMongoSaslGSSAPIAuthenticatorinitializeinitialize_challengeevaluate_challengevalid?@valid@user_name@host_name@service_name@canonicalize_host_nameGSSAPI@context$$! << < (PaXzRx  &0:DNhr|! `>@_rb_cObjectQq@___stack_chk_guard@dyld_stub_binderq >@_rb_check_typeq(>@_rb_const_getq0>@_rb_data_object_allocq8>@_rb_define_class_underq@>@_rb_define_methodqH>@_rb_intern2qP>@_rb_iv_getqX>@_rb_iv_setq`>@_rb_str_newqh>@_rb_string_valueqp@_sasl_client_initqx@_sasl_client_newq@_sasl_client_startq@_sasl_client_stepq@_sasl_decode64q@_sasl_disposeq@_sasl_doneq@_sasl_encode64q@___stack_chk_fail_is_sasl_failure=4.3.0', '<5.0.0' end mongo-2.5.1/checksums.yaml.gz.sig0000444000004100000410000000040013257253113016731 0ustar www-datawww-dataBl5 b{BRg ̢%rϸ,fmŮD|mEy5wa1rk4x 4~I g;dDw(/Cj[ "tӦ0H0utBeN{x"M jǬw}!eeC{W9|d;=W*5q2bd̞a1ػY-+$7ZϾs@kCd#C[o&P mongo-2.5.1/CONTRIBUTING.md0000644000004100000410000000456113257253113015127 0ustar www-datawww-data## Contributing to the MongoDB Ruby Driver Thank you for your interest in contributing to the MongoDB Ruby driver. We are building this software together and strongly encourage contributions from the community that are within the guidelines set forth below. Bug Fixes and New Features -------------------------- Before starting to write code, look for existing [tickets] (https://jira.mongodb.org/browse/RUBY) or [create one] (https://jira.mongodb.org/secure/CreateIssue!default.jspa) for your bug, issue, or feature request. This helps the community avoid working on something that might not be of interest or which has already been addressed. Environment ----------- We highly suggest using [RVM](https://rvm.io/) or [rbenv] (https://github.com/sstephenson/rbenv) to set up Ruby development and testing environments. In this way, moving between and testing code for alternate Ruby versions (besides the one possibly included with your system) is simple. This practice is essential for ensuring the quality of the driver. Pull Requests ------------- Pull requests should be made against the master (development) branch and include relevant tests, if applicable. The driver follows the Git-Flow branching model where the traditional master branch is known as release and the master (default) branch is considered under development. Tests should pass under all Ruby interpreters which the MongoDB Ruby driver currently supports (1.8.7, 1.9.3, JRuby 1.6.x and 1.7.x) and will be automatically tested. The results of pull request testing will be appended to the request. If any tests do not pass, or relavant tests are not included the pull request will not be considered. Clusters and Replica Sets ------------------------- If your bug fix or enhancement deals with Cluster or Replica Set code, please run all relevant tests for those code subsets before issuing the request. * `rake test:sharded_cluster` for sharded clusters * `rake test:replica_set` for replica sets Cluster and Replica Set testing is currently **not** automatically performed so it is important they are run in a thorough fashion under all supported interpreters before a pull request is made. Talk To Us ---------- We love to hear from you. If you want to work on something or have questions / complaints please reach out to us by creating a [question] (https://jira.mongodb.org/secure/CreateIssue.jspa?pid=10005&issuetype=6). mongo-2.5.1/LICENSE0000644000004100000410000002501713257253113013702 0ustar www-datawww-data Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright (C) 2009-2017 MongoDB, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mongo-2.5.1/metadata.gz.sig0000444000004100000410000000040013257253113015563 0ustar www-datawww-data^:GdX^y3.Z}K࿎=U ƸK"R\1B{M2#Cgo}VnLI_n4Ii[ҘsgiiM%R4=O6j%$s=WyYh5:X9fZ bu^PPH+yUe065; /dYr߄6ɐKڗ9Nv]qݬjZj8lcRve/6nn{sNk%mongo-2.5.1/README.md0000644000004100000410000000662513257253113014160 0ustar www-datawww-dataMongoDB Ruby Driver [![Build Status][travis-img]][travis-url] [![Code Climate][codeclimate-img]][codeclimate-url] [![Gem Version][rubygems-img]][rubygems-url] ----- The officially supported Ruby driver for [MongoDB](http://www.mongodb.org). Documentation ----- Documentation is located [here](http://docs.mongodb.org/ecosystem/drivers/ruby/). API docs can be found [here](http://api.mongodb.org/ruby/). Support & Feedback ----- For issues, questions or feedback related to the Ruby driver, please look into our [support channels](http://www.mongodb.org/about/support). Please do not email any of the Ruby developers directly with issues or questions - you're more likely to get an answer quickly on the [mongodb-user list](http://groups.google.com/group/mongodb-user) on Google Groups. Bugs & Feature Requests ----- Do you have a bug to report or a feature request to make? 1. Visit [our issue tracker](https://jira.mongodb.org) and login (or create an account if necessary). 2. Navigate to the [RUBY](https://jira.mongodb.org/browse/RUBY) project. 3. Click 'Create Issue' and fill out all the applicable form fields. When reporting an issue, please keep in mind that all information in JIRA for all driver projects (ex. RUBY, CSHARP, JAVA) and the Core Server (ex. SERVER) project is **PUBLICLY** visible. **PLEASE DO** * Provide as much information as possible about the issue. * Provide detailed steps for reproducing the issue. * Provide any applicable code snippets, stack traces and log data. * Specify version information for the driver and MongoDB. **PLEASE DO NOT** * Provide any sensitive data or server logs. * Report potential security issues publicly (see 'Security Issues'). Running Tests ----- The driver uses RSpec as it's primary testing tool. To run all tests simple run `rspec`. If you need to run the tests without making any external connections, set the ENV variable EXTERNAL_DISABLED to 'true'. To run a test at a specific location (where `42` is the line number), use: rspec path/to/spec.rb:42 Security Issues ----- If you’ve identified a potential security related issue in a driver or any other MongoDB project, please report it by following the [instructions here](http://docs.mongodb.org/manual/tutorial/create-a-vulnerability-report). Release History ----- Full release notes and release history are available [here](https://github.com/mongodb/mongo-ruby-driver/releases). License ----- Copyright (C) 2009-2017 MongoDB, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [rubygems-img]: https://badge.fury.io/rb/mongo.svg [rubygems-url]: http://badge.fury.io/rb/mongo [travis-img]: https://secure.travis-ci.org/mongodb/mongo-ruby-driver.svg?branch=master [travis-url]: http://travis-ci.org/mongodb/mongo-ruby-driver?branch=master [codeclimate-img]: https://codeclimate.com/github/mongodb/mongo-ruby-driver.svg?branch=master [codeclimate-url]: https://codeclimate.com/github/mongodb/mongo-ruby-driver?branch=master