redis-cluster-client-0.10.0/0000775000175100017510000000000014625140707016023 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/redis-cluster-client.gemspec0000664000175100017510000000454614625140707023442 0ustar vivekdebvivekdeb######################################################### # This file has been automatically generated by gem2tgz # ######################################################### # -*- encoding: utf-8 -*- # stub: redis-cluster-client 0.10.0 ruby lib Gem::Specification.new do |s| s.name = "redis-cluster-client".freeze s.version = "0.10.0" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.metadata = { "allowed_push_host" => "https://rubygems.org", "rubygems_mfa_required" => "true" } if s.respond_to? :metadata= s.require_paths = ["lib".freeze] s.authors = ["Taishi Kasuga".freeze] s.date = "2024-05-13" s.email = ["proxy0721@gmail.com".freeze] s.files = ["lib/redis-cluster-client.rb".freeze, "lib/redis_client/cluster.rb".freeze, "lib/redis_client/cluster/command.rb".freeze, "lib/redis_client/cluster/concurrent_worker.rb".freeze, "lib/redis_client/cluster/concurrent_worker/none.rb".freeze, "lib/redis_client/cluster/concurrent_worker/on_demand.rb".freeze, "lib/redis_client/cluster/concurrent_worker/pooled.rb".freeze, "lib/redis_client/cluster/error_identification.rb".freeze, "lib/redis_client/cluster/errors.rb".freeze, "lib/redis_client/cluster/key_slot_converter.rb".freeze, "lib/redis_client/cluster/node.rb".freeze, "lib/redis_client/cluster/node/base_topology.rb".freeze, "lib/redis_client/cluster/node/latency_replica.rb".freeze, "lib/redis_client/cluster/node/primary_only.rb".freeze, "lib/redis_client/cluster/node/random_replica.rb".freeze, "lib/redis_client/cluster/node/random_replica_or_primary.rb".freeze, "lib/redis_client/cluster/node_key.rb".freeze, "lib/redis_client/cluster/normalized_cmd_name.rb".freeze, "lib/redis_client/cluster/optimistic_locking.rb".freeze, "lib/redis_client/cluster/pipeline.rb".freeze, "lib/redis_client/cluster/pub_sub.rb".freeze, "lib/redis_client/cluster/router.rb".freeze, "lib/redis_client/cluster/transaction.rb".freeze, "lib/redis_client/cluster_config.rb".freeze, "lib/redis_cluster_client.rb".freeze] s.homepage = "https://github.com/redis-rb/redis-cluster-client".freeze s.licenses = ["MIT".freeze] s.required_ruby_version = Gem::Requirement.new(">= 2.7.0".freeze) s.rubygems_version = "3.4.20".freeze s.summary = "A Redis cluster client for Ruby".freeze s.specification_version = 4 s.add_runtime_dependency(%q.freeze, ["~> 0.22"]) end redis-cluster-client-0.10.0/lib/0000775000175100017510000000000014625140707016571 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/lib/redis_cluster_client.rb0000644000175100017510000000026614625140707023325 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster_config' class RedisClient class << self def cluster(**kwargs) ClusterConfig.new(**kwargs) end end end redis-cluster-client-0.10.0/lib/redis_client/0000775000175100017510000000000014625140707021235 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/lib/redis_client/cluster_config.rb0000644000175100017510000001324614625140707024574 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'uri' require 'redis_client' require 'redis_client/cluster' require 'redis_client/cluster/node_key' require 'redis_client/command_builder' class RedisClient class ClusterConfig DEFAULT_HOST = '127.0.0.1' DEFAULT_PORT = 6379 DEFAULT_SCHEME = 'redis' SECURE_SCHEME = 'rediss' DEFAULT_NODES = ["#{DEFAULT_SCHEME}://#{DEFAULT_HOST}:#{DEFAULT_PORT}"].freeze VALID_SCHEMES = [DEFAULT_SCHEME, SECURE_SCHEME].freeze VALID_NODES_KEYS = %i[ssl username password host port db].freeze MERGE_CONFIG_KEYS = %i[ssl username password].freeze IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze MAX_WORKERS = Integer(ENV.fetch('REDIS_CLIENT_MAX_THREADS', 5)) # It's used with slow queries of fetching meta data like CLUSTER NODES, COMMAND and so on. SLOW_COMMAND_TIMEOUT = Float(ENV.fetch('REDIS_CLIENT_SLOW_COMMAND_TIMEOUT', -1)) InvalidClientConfigError = Class.new(::RedisClient::Error) attr_reader :command_builder, :client_config, :replica_affinity, :slow_command_timeout, :connect_with_original_config, :startup_nodes def initialize( nodes: DEFAULT_NODES, replica: false, replica_affinity: :random, fixed_hostname: '', concurrency: nil, connect_with_original_config: false, client_implementation: ::RedisClient::Cluster, # for redis gem slow_command_timeout: SLOW_COMMAND_TIMEOUT, command_builder: ::RedisClient::CommandBuilder, **client_config ) @replica = true & replica @replica_affinity = replica_affinity.to_s.to_sym @fixed_hostname = fixed_hostname.to_s @command_builder = command_builder node_configs = build_node_configs(nodes.dup) @client_config = merge_generic_config(client_config, node_configs) # Keep tabs on the original startup nodes we were constructed with @startup_nodes = build_startup_nodes(node_configs) @concurrency = merge_concurrency_option(concurrency) @connect_with_original_config = connect_with_original_config @client_implementation = client_implementation @slow_command_timeout = slow_command_timeout end def inspect "#<#{self.class.name} #{startup_nodes.values}>" end def read_timeout @client_config[:read_timeout] || @client_config[:timeout] || ::RedisClient::Config::DEFAULT_TIMEOUT end def new_pool(size: 5, timeout: 5, **kwargs) @client_implementation.new( self, pool: { size: size, timeout: timeout }, concurrency: @concurrency, **kwargs ) end def new_client(**kwargs) @client_implementation.new(self, concurrency: @concurrency, **kwargs) end def use_replica? @replica end def client_config_for_node(node_key) config = ::RedisClient::Cluster::NodeKey.hashify(node_key) config[:port] = ensure_integer(config[:port]) augment_client_config(config) end private def merge_concurrency_option(option) case option when Hash option = option.transform_keys(&:to_sym) { size: MAX_WORKERS }.merge(option) else { size: MAX_WORKERS } end end def build_node_configs(addrs) configs = Array[addrs].flatten.filter_map { |addr| parse_node_addr(addr) } raise InvalidClientConfigError, '`nodes` option is empty' if configs.empty? configs end def parse_node_addr(addr) case addr when String parse_node_url(addr) when Hash parse_node_option(addr) else raise InvalidClientConfigError, "`nodes` option includes invalid type values: #{addr}" end end def parse_node_url(addr) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity return if addr.empty? uri = URI(addr) scheme = uri.scheme || DEFAULT_SCHEME raise InvalidClientConfigError, "`nodes` option includes a invalid uri scheme: #{addr}" unless VALID_SCHEMES.include?(scheme) username = uri.user ? URI.decode_www_form_component(uri.user) : nil password = uri.password ? URI.decode_www_form_component(uri.password) : nil host = uri.host || DEFAULT_HOST port = uri.port || DEFAULT_PORT db = uri.path.index('/').nil? ? uri.path : uri.path.split('/')[1] db = db.nil? || db.empty? ? db : ensure_integer(db) { ssl: scheme == SECURE_SCHEME, username: username, password: password, host: host, port: port, db: db } .reject { |_, v| v.nil? || v == '' || v == false } rescue URI::InvalidURIError => e raise InvalidClientConfigError, "#{e.message}: #{addr}" end def parse_node_option(addr) return if addr.empty? addr = addr.transform_keys(&:to_sym) addr[:host] ||= DEFAULT_HOST addr[:port] = ensure_integer(addr[:port] || DEFAULT_PORT) addr.select { |k, _| VALID_NODES_KEYS.include?(k) } end def ensure_integer(value) Integer(value) rescue ArgumentError => e raise InvalidClientConfigError, e.message end def merge_generic_config(client_config, node_configs) cfg = node_configs.first || {} client_config.reject { |k, _| IGNORE_GENERIC_CONFIG_KEYS.include?(k) } .merge(cfg.slice(*MERGE_CONFIG_KEYS)) end def build_startup_nodes(configs) configs.to_h do |config| node_key = ::RedisClient::Cluster::NodeKey.build_from_host_port(config[:host], config[:port]) config = augment_client_config(config) [node_key, config] end end def augment_client_config(config) config = @client_config.merge(config) config = config.merge(host: @fixed_hostname) unless @fixed_hostname.empty? config end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/0000775000175100017510000000000014625140707022716 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/lib/redis_client/cluster/transaction.rb0000644000175100017510000001266414625140707025577 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/cluster/pipeline' class RedisClient class Cluster class Transaction ConsistencyError = Class.new(::RedisClient::Error) MAX_REDIRECTION = 2 EMPTY_ARRAY = [].freeze def initialize(router, command_builder, node: nil, slot: nil, asking: false) @router = router @command_builder = command_builder @retryable = true @pipeline = ::RedisClient::Pipeline.new(@command_builder) @pending_commands = [] @node = node prepare_tx unless @node.nil? @watching_slot = slot @asking = asking end def call(*command, **kwargs, &block) command = @command_builder.generate(command, kwargs) if prepare(command) @pipeline.call_v(command, &block) else defer { @pipeline.call_v(command, &block) } end end def call_v(command, &block) command = @command_builder.generate(command) if prepare(command) @pipeline.call_v(command, &block) else defer { @pipeline.call_v(command, &block) } end end def call_once(*command, **kwargs, &block) @retryable = false command = @command_builder.generate(command, kwargs) if prepare(command) @pipeline.call_once_v(command, &block) else defer { @pipeline.call_once_v(command, &block) } end end def call_once_v(command, &block) @retryable = false command = @command_builder.generate(command) if prepare(command) @pipeline.call_once_v(command, &block) else defer { @pipeline.call_once_v(command, &block) } end end def execute @pending_commands.each(&:call) return EMPTY_ARRAY if @pipeline._empty? raise ConsistencyError, "couldn't determine the node: #{@pipeline._commands}" if @node.nil? commit end private def defer(&block) @pending_commands << block nil end def prepare(command) return true unless @node.nil? node_key = @router.find_primary_node_key(command) return false if node_key.nil? @node = @router.find_node(node_key) prepare_tx true end def prepare_tx @pipeline.call('MULTI') @pending_commands.each(&:call) @pending_commands.clear end def commit @pipeline.call('EXEC') settle end def cancel @pipeline.call('DISCARD') settle end def settle # If we needed ASKING on the watch, we need ASKING on the multi as well. @node.call('ASKING') if @asking # Don't handle redirections at this level if we're in a watch (the watcher handles redirections # at the whole-transaction level.) send_transaction(@node, redirect: !!@watching_slot ? 0 : MAX_REDIRECTION) end def send_transaction(client, redirect:) case client when ::RedisClient then send_pipeline(client, redirect: redirect) when ::RedisClient::Pooled then client.with { |c| send_pipeline(c, redirect: redirect) } else raise NotImplementedError, "#{client.class.name}#multi for cluster client" end end def send_pipeline(client, redirect:) replies = client.ensure_connected_cluster_scoped(retryable: @retryable) do |connection| commands = @pipeline._commands client.middlewares.call_pipelined(commands, client.config) do connection.call_pipelined(commands, nil) rescue ::RedisClient::CommandError => e ensure_the_same_slot!(commands) return handle_command_error!(e, redirect: redirect) unless redirect.zero? raise end end return if replies.last.nil? coerce_results!(replies.last) end def coerce_results!(results, offset: 1) results.each_with_index do |result, index| if result.is_a?(::RedisClient::CommandError) result._set_command(@pipeline._commands[index + offset]) raise result end next if @pipeline._blocks.nil? block = @pipeline._blocks[index + offset] next if block.nil? results[index] = block.call(result) end results end def handle_command_error!(err, redirect:) # rubocop:disable Metrics/AbcSize if err.message.start_with?('CROSSSLOT') raise ConsistencyError, "#{err.message}: #{err.command}" elsif err.message.start_with?('MOVED') node = @router.assign_redirection_node(err.message) send_transaction(node, redirect: redirect - 1) elsif err.message.start_with?('ASK') node = @router.assign_asking_node(err.message) try_asking(node) ? send_transaction(node, redirect: redirect - 1) : err else raise err end end def ensure_the_same_slot!(commands) slots = commands.map { |command| @router.find_slot(command) }.compact.uniq return if slots.size == 1 && @watching_slot.nil? return if slots.size == 1 && @watching_slot == slots.first raise(ConsistencyError, "the transaction should be executed to a slot in a node: #{commands}") end def try_asking(node) node.call('ASKING') == 'OK' rescue StandardError false end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/router.rb0000644000175100017510000003660214625140707024570 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/circuit_breaker' require 'redis_client/cluster/command' require 'redis_client/cluster/errors' require 'redis_client/cluster/key_slot_converter' require 'redis_client/cluster/node' require 'redis_client/cluster/node_key' require 'redis_client/cluster/normalized_cmd_name' require 'redis_client/cluster/transaction' require 'redis_client/cluster/optimistic_locking' require 'redis_client/cluster/pipeline' require 'redis_client/cluster/error_identification' class RedisClient class Cluster class Router ZERO_CURSOR_FOR_SCAN = '0' TSF = ->(f, x) { f.nil? ? x : f.call(x) }.curry def initialize(config, concurrent_worker, pool: nil, **kwargs) @config = config.dup @original_config = config.dup if config.connect_with_original_config @connect_with_original_config = config.connect_with_original_config @concurrent_worker = concurrent_worker @pool = pool @client_kwargs = kwargs @node = ::RedisClient::Cluster::Node.new(concurrent_worker, config: config, pool: pool, **kwargs) update_cluster_info! @command = ::RedisClient::Cluster::Command.load(@node.replica_clients.shuffle, slow_command_timeout: config.slow_command_timeout) @command_builder = @config.command_builder end def send_command(method, command, *args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity cmd = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) case cmd when 'ping' then @node.send_ping(method, command, args).first.then(&TSF.call(block)) when 'wait' then send_wait_command(method, command, args, &block) when 'keys' then @node.call_replicas(method, command, args).flatten.sort_by(&:to_s).then(&TSF.call(block)) when 'dbsize' then @node.call_replicas(method, command, args).select { |e| e.is_a?(Integer) }.sum.then(&TSF.call(block)) when 'scan' then scan(command, seed: 1) when 'lastsave' then @node.call_all(method, command, args).sort_by(&:to_i).then(&TSF.call(block)) when 'role' then @node.call_all(method, command, args, &block) when 'config' then send_config_command(method, command, args, &block) when 'client' then send_client_command(method, command, args, &block) when 'cluster' then send_cluster_command(method, command, args, &block) when 'memory' then send_memory_command(method, command, args, &block) when 'script' then send_script_command(method, command, args, &block) when 'pubsub' then send_pubsub_command(method, command, args, &block) when 'watch' then send_watch_command(command, &block) when 'mset', 'mget', 'del' send_multiple_keys_command(cmd, method, command, args, &block) when 'acl', 'auth', 'bgrewriteaof', 'bgsave', 'quit', 'save' @node.call_all(method, command, args).first.then(&TSF.call(block)) when 'flushall', 'flushdb' @node.call_primaries(method, command, args).first.then(&TSF.call(block)) when 'readonly', 'readwrite', 'shutdown' raise ::RedisClient::Cluster::OrchestrationCommandNotSupported, cmd when 'discard', 'exec', 'multi', 'unwatch' raise ::RedisClient::Cluster::AmbiguousNodeError, cmd else node = assign_node(command) try_send(node, method, command, args, &block) end rescue ::RedisClient::CircuitBreaker::OpenCircuitError raise rescue ::RedisClient::Cluster::Node::ReloadNeeded update_cluster_info! raise ::RedisClient::Cluster::NodeMightBeDown rescue ::RedisClient::Cluster::ErrorCollection => e raise if e.errors.any?(::RedisClient::CircuitBreaker::OpenCircuitError) update_cluster_info! if e.errors.values.any? do |err| next false if ::RedisClient::Cluster::ErrorIdentification.identifiable?(err) && @node.none? { |c| ::RedisClient::Cluster::ErrorIdentification.client_owns_error?(err, c) } err.message.start_with?('CLUSTERDOWN Hash slot not served') end raise end # @see https://redis.io/docs/reference/cluster-spec/#redirection-and-resharding Redirection and resharding def try_send(node, method, command, args, retry_count: 3, &block) handle_redirection(node, retry_count: retry_count) do |on_node| if args.empty? # prevent memory allocation for variable-length args on_node.public_send(method, command, &block) else on_node.public_send(method, *args, command, &block) end end end def try_delegate(node, method, *args, retry_count: 3, **kwargs, &block) handle_redirection(node, retry_count: retry_count) do |on_node| on_node.public_send(method, *args, **kwargs, &block) end end def handle_redirection(node, retry_count:) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity yield node rescue ::RedisClient::CircuitBreaker::OpenCircuitError raise rescue ::RedisClient::CommandError => e raise unless ::RedisClient::Cluster::ErrorIdentification.client_owns_error?(e, node) if e.message.start_with?('MOVED') node = assign_redirection_node(e.message) retry_count -= 1 retry if retry_count >= 0 elsif e.message.start_with?('ASK') node = assign_asking_node(e.message) retry_count -= 1 if retry_count >= 0 node.call('ASKING') retry end elsif e.message.start_with?('CLUSTERDOWN Hash slot not served') update_cluster_info! retry_count -= 1 retry if retry_count >= 0 end raise rescue ::RedisClient::ConnectionError => e raise unless ::RedisClient::Cluster::ErrorIdentification.client_owns_error?(e, node) update_cluster_info! raise if retry_count <= 0 retry_count -= 1 retry end def scan(*command, seed: nil, **kwargs) # rubocop:disable Metrics/AbcSize command = @command_builder.generate(command, kwargs) command[1] = ZERO_CURSOR_FOR_SCAN if command.size == 1 input_cursor = Integer(command[1]) client_index = input_cursor % 256 raw_cursor = input_cursor >> 8 clients = @node.clients_for_scanning(seed: seed) client = clients[client_index] return [ZERO_CURSOR_FOR_SCAN, []] unless client command[1] = raw_cursor.to_s result_cursor, result_keys = client.call_v(command) result_cursor = Integer(result_cursor) client_index += 1 if result_cursor == 0 [((result_cursor << 8) + client_index).to_s, result_keys] end def assign_node(command) node_key = find_node_key(command) find_node(node_key) end def find_node_key_by_key(key, seed: nil, primary: false) if key && !key.empty? slot = ::RedisClient::Cluster::KeySlotConverter.convert(key) primary ? @node.find_node_key_of_primary(slot) : @node.find_node_key_of_replica(slot) else primary ? @node.any_primary_node_key(seed: seed) : @node.any_replica_node_key(seed: seed) end end def find_primary_node_by_slot(slot) node_key = @node.find_node_key_of_primary(slot) find_node(node_key) end def find_node_key(command, seed: nil) key = @command.extract_first_key(command) find_node_key_by_key(key, seed: seed, primary: @command.should_send_to_primary?(command)) end def find_primary_node_key(command) key = @command.extract_first_key(command) return nil unless key&.size&.> 0 find_node_key_by_key(key, primary: true) end def find_slot(command) find_slot_by_key(@command.extract_first_key(command)) end def find_slot_by_key(key) return if key.empty? ::RedisClient::Cluster::KeySlotConverter.convert(key) end def find_node(node_key, retry_count: 3) @node.find_by(node_key) rescue ::RedisClient::Cluster::Node::ReloadNeeded raise ::RedisClient::Cluster::NodeMightBeDown if retry_count <= 0 update_cluster_info! retry_count -= 1 retry end def command_exists?(name) @command.exists?(name) end def assign_redirection_node(err_msg) _, slot, node_key = err_msg.split slot = slot.to_i @node.update_slot(slot, node_key) find_node(node_key) end def assign_asking_node(err_msg) _, _, node_key = err_msg.split find_node(node_key) end def node_keys @node.node_keys end def close @node.each(&:close) end private def send_wait_command(method, command, args, retry_count: 3, &block) # rubocop:disable Metrics/AbcSize @node.call_primaries(method, command, args).select { |r| r.is_a?(Integer) }.sum.then(&TSF.call(block)) rescue ::RedisClient::Cluster::ErrorCollection => e raise if e.errors.any?(::RedisClient::CircuitBreaker::OpenCircuitError) raise if retry_count <= 0 raise if e.errors.values.none? do |err| err.message.include?('WAIT cannot be used with replica instances') end update_cluster_info! retry_count -= 1 retry end def send_config_command(method, command, args, &block) case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'resetstat', 'rewrite', 'set' @node.call_all(method, command, args).first.then(&TSF.call(block)) else assign_node(command).public_send(method, *args, command, &block) end end def send_memory_command(method, command, args, &block) case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'stats' then @node.call_all(method, command, args, &block) when 'purge' then @node.call_all(method, command, args).first.then(&TSF.call(block)) else assign_node(command).public_send(method, *args, command, &block) end end def send_client_command(method, command, args, &block) case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'list' then @node.call_all(method, command, args, &block).flatten when 'pause', 'reply', 'setname' @node.call_all(method, command, args).first.then(&TSF.call(block)) else assign_node(command).public_send(method, *args, command, &block) end end def send_cluster_command(method, command, args, &block) case subcommand = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'addslots', 'delslots', 'failover', 'forget', 'meet', 'replicate', 'reset', 'set-config-epoch', 'setslot' raise ::RedisClient::Cluster::OrchestrationCommandNotSupported, ['cluster', subcommand] when 'saveconfig' then @node.call_all(method, command, args).first.then(&TSF.call(block)) when 'getkeysinslot' raise ArgumentError, command.join(' ') if command.size != 4 find_node(@node.find_node_key_of_replica(command[2])).public_send(method, *args, command, &block) else assign_node(command).public_send(method, *args, command, &block) end end def send_script_command(method, command, args, &block) # rubocop:disable Metrics/AbcSize case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'debug', 'kill' @node.call_all(method, command, args).first.then(&TSF.call(block)) when 'flush', 'load' @node.call_primaries(method, command, args).first.then(&TSF.call(block)) when 'exists' @node.call_all(method, command, args).transpose.map { |arr| arr.any?(&:zero?) ? 0 : 1 }.then(&TSF.call(block)) else assign_node(command).public_send(method, *args, command, &block) end end def send_pubsub_command(method, command, args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_subcommand(command) when 'channels' @node.call_all(method, command, args).flatten.uniq.sort_by(&:to_s).then(&TSF.call(block)) when 'shardchannels' @node.call_replicas(method, command, args).flatten.uniq.sort_by(&:to_s).then(&TSF.call(block)) when 'numpat' @node.call_all(method, command, args).select { |e| e.is_a?(Integer) }.sum.then(&TSF.call(block)) when 'numsub' @node.call_all(method, command, args).reject(&:empty?).map { |e| Hash[*e] } .reduce({}) { |a, e| a.merge(e) { |_, v1, v2| v1 + v2 } }.then(&TSF.call(block)) when 'shardnumsub' @node.call_replicas(method, command, args).reject(&:empty?).map { |e| Hash[*e] } .reduce({}) { |a, e| a.merge(e) { |_, v1, v2| v1 + v2 } }.then(&TSF.call(block)) else assign_node(command).public_send(method, *args, command, &block) end end def send_watch_command(command) raise ::RedisClient::Cluster::Transaction::ConsistencyError, 'A block required. And you need to use the block argument as a client for the transaction.' unless block_given? ::RedisClient::Cluster::OptimisticLocking.new(self).watch(command[1..]) do |c, slot, asking| transaction = ::RedisClient::Cluster::Transaction.new( self, @command_builder, node: c, slot: slot, asking: asking ) yield transaction transaction.execute end end MULTIPLE_KEYS_COMMAND_TO_SINGLE = { 'mget' => ['get', 1].freeze, 'mset' => ['set', 2].freeze, 'del' => ['del', 1].freeze }.freeze def send_multiple_keys_command(cmd, method, command, args, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity # This implementation is prioritized performance rather than readability or so. single_key_cmd, keys_step = MULTIPLE_KEYS_COMMAND_TO_SINGLE.fetch(cmd) return try_send(assign_node(command), method, command, args, &block) if command.size <= keys_step + 1 || ::RedisClient::Cluster::KeySlotConverter.hash_tag_included?(command[1]) seed = @config.use_replica? && @config.replica_affinity == :random ? nil : Random.new_seed pipeline = ::RedisClient::Cluster::Pipeline.new(self, @command_builder, @concurrent_worker, exception: true, seed: seed) single_command = Array.new(keys_step + 1) single_command[0] = single_key_cmd if keys_step == 1 command[1..].each do |key| single_command[1] = key pipeline.call_v(single_command) end else command[1..].each_slice(keys_step) do |v| keys_step.times { |i| single_command[i + 1] = v[i] } pipeline.call_v(single_command) end end replies = pipeline.execute result = case cmd when 'mset' then replies.first when 'del' then replies.sum else replies end block_given? ? yield(result) : result end def update_cluster_info! @node.reload! end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/pub_sub.rb0000644000175100017510000000737314625140707024712 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/cluster/normalized_cmd_name' class RedisClient class Cluster class PubSub class State def initialize(client, queue) @client = client @worker = nil @queue = queue end def call(command) @client.call_v(command) end def ensure_worker @worker = spawn_worker(@client, @queue) unless @worker&.alive? end def close @worker.exit if @worker&.alive? @client.close end private def spawn_worker(client, queue) # Ruby VM allocates 1 MB memory as a stack for a thread. # It is a fixed size but we can modify the size with some environment variables. # So it consumes memory 1 MB multiplied a number of workers. Thread.new(client, queue) do |pubsub, q| loop do q << pubsub.next_event rescue StandardError => e q << e end end end end BUF_SIZE = Integer(ENV.fetch('REDIS_CLIENT_PUBSUB_BUF_SIZE', 1024)) def initialize(router, command_builder) @router = router @command_builder = command_builder @queue = SizedQueue.new(BUF_SIZE) @state_dict = {} end def call(*args, **kwargs) _call(@command_builder.generate(args, kwargs)) nil end def call_v(command) _call(@command_builder.generate(command)) nil end def close @state_dict.each_value(&:close) @state_dict.clear @queue.clear @queue.close nil end def next_event(timeout = nil) @state_dict.each_value(&:ensure_worker) max_duration = calc_max_duration(timeout) starting = obtain_current_time loop do break if max_duration > 0 && obtain_current_time - starting > max_duration case event = @queue.pop(true) when StandardError then raise event when Array then break event end rescue ThreadError sleep 0.005 end end private def _call(command) case ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) when 'subscribe', 'psubscribe', 'ssubscribe' then call_to_single_state(command) when 'unsubscribe', 'punsubscribe' then call_to_all_states(command) when 'sunsubscribe' then call_for_sharded_states(command) else call_to_single_state(command) end end def call_to_single_state(command) node_key = @router.find_node_key(command) try_call(node_key, command) end def call_to_all_states(command) @state_dict.each_value { |s| s.call(command) } end def call_for_sharded_states(command) if command.size == 1 call_to_all_states(command) else call_to_single_state(command) end end def try_call(node_key, command, retry_count: 1) add_state(node_key).call(command) rescue ::RedisClient::CommandError => e raise if !e.message.start_with?('MOVED') || retry_count <= 0 # for sharded pub/sub node_key = e.message.split[2] retry_count -= 1 retry end def add_state(node_key) return @state_dict[node_key] if @state_dict.key?(node_key) state = State.new(@router.find_node(node_key).pubsub, @queue) @state_dict[node_key] = state end def obtain_current_time Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) end def calc_max_duration(timeout) timeout.nil? || timeout < 0 ? 0 : timeout * 1_000_000 end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/pipeline.rb0000644000175100017510000002110114625140707025041 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/cluster/errors' require 'redis_client/connection_mixin' require 'redis_client/middlewares' require 'redis_client/pooled' class RedisClient class Cluster class Pipeline class Extended < ::RedisClient::Pipeline attr_reader :outer_indices def initialize(...) super @outer_indices = nil end def add_outer_index(index) @outer_indices ||= [] @outer_indices << index end def get_inner_index(outer_index) @outer_indices&.find_index(outer_index) end def get_callee_method(inner_index) if @timeouts.is_a?(Array) && !@timeouts[inner_index].nil? :blocking_call_v elsif _retryable? :call_once_v else :call_v end end def get_command(inner_index) @commands.is_a?(Array) ? @commands[inner_index] : nil end def get_timeout(inner_index) @timeouts.is_a?(Array) ? @timeouts[inner_index] : nil end def get_block(inner_index) @blocks.is_a?(Array) ? @blocks[inner_index] : nil end end ::RedisClient::ConnectionMixin.module_eval do def call_pipelined_aware_of_redirection(commands, timeouts, exception:) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity size = commands.size results = Array.new(commands.size) @pending_reads += size write_multi(commands) redirection_indices = nil first_exception = nil size.times do |index| timeout = timeouts && timeouts[index] result = read(timeout) @pending_reads -= 1 if result.is_a?(::RedisClient::Error) result._set_command(commands[index]) if result.is_a?(::RedisClient::CommandError) && result.message.start_with?('MOVED', 'ASK') redirection_indices ||= [] redirection_indices << index elsif exception first_exception ||= result end end results[index] = result end raise first_exception if exception && first_exception return results if redirection_indices.nil? err = ::RedisClient::Cluster::Pipeline::RedirectionNeeded.new err.replies = results err.indices = redirection_indices raise err end end ::RedisClient.class_eval do attr_reader :middlewares def ensure_connected_cluster_scoped(retryable: true, &block) ensure_connected(retryable: retryable, &block) end end ReplySizeError = Class.new(::RedisClient::Error) class RedirectionNeeded < ::RedisClient::Error attr_accessor :replies, :indices end def initialize(router, command_builder, concurrent_worker, exception:, seed: Random.new_seed) @router = router @command_builder = command_builder @concurrent_worker = concurrent_worker @exception = exception @seed = seed @pipelines = nil @size = 0 end def call(*args, **kwargs, &block) command = @command_builder.generate(args, kwargs) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).call_v(command, &block) end def call_v(args, &block) command = @command_builder.generate(args) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).call_v(command, &block) end def call_once(*args, **kwargs, &block) command = @command_builder.generate(args, kwargs) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).call_once_v(command, &block) end def call_once_v(args, &block) command = @command_builder.generate(args) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).call_once_v(command, &block) end def blocking_call(timeout, *args, **kwargs, &block) command = @command_builder.generate(args, kwargs) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).blocking_call_v(timeout, command, &block) end def blocking_call_v(timeout, args, &block) command = @command_builder.generate(args) node_key = @router.find_node_key(command, seed: @seed) append_pipeline(node_key).blocking_call_v(timeout, command, &block) end def empty? @size.zero? end def execute # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity return if @pipelines.nil? || @pipelines.empty? work_group = @concurrent_worker.new_group(size: @pipelines.size) @pipelines.each do |node_key, pipeline| work_group.push(node_key, @router.find_node(node_key), pipeline) do |cli, pl| replies = do_pipelining(cli, pl) raise ReplySizeError, "commands: #{pl._size}, replies: #{replies.size}" if pl._size != replies.size replies end end all_replies = errors = required_redirections = nil work_group.each do |node_key, v| case v when ::RedisClient::Cluster::Pipeline::RedirectionNeeded required_redirections ||= {} required_redirections[node_key] = v when StandardError errors ||= {} errors[node_key] = v else all_replies ||= Array.new(@size) @pipelines[node_key].outer_indices.each_with_index { |outer, inner| all_replies[outer] = v[inner] } end end work_group.close raise ::RedisClient::Cluster::ErrorCollection, errors unless errors.nil? required_redirections&.each do |node_key, v| all_replies ||= Array.new(@size) pipeline = @pipelines[node_key] v.indices.each { |i| v.replies[i] = handle_redirection(v.replies[i], pipeline, i) } pipeline.outer_indices.each_with_index { |outer, inner| all_replies[outer] = v.replies[inner] } end all_replies end private def append_pipeline(node_key) @pipelines ||= {} @pipelines[node_key] ||= ::RedisClient::Cluster::Pipeline::Extended.new(@command_builder) @pipelines[node_key].add_outer_index(@size) @size += 1 @pipelines[node_key] end def do_pipelining(client, pipeline) case client when ::RedisClient then send_pipeline(client, pipeline) when ::RedisClient::Pooled then client.with { |cli| send_pipeline(cli, pipeline) } else raise NotImplementedError, "#{client.class.name}#pipelined for cluster client" end end def send_pipeline(client, pipeline) results = client.ensure_connected_cluster_scoped(retryable: pipeline._retryable?) do |connection| commands = pipeline._commands client.middlewares.call_pipelined(commands, client.config) do connection.call_pipelined_aware_of_redirection(commands, pipeline._timeouts, exception: @exception) end end pipeline._coerce!(results) end def handle_redirection(err, pipeline, inner_index) return err unless err.is_a?(::RedisClient::CommandError) if err.message.start_with?('MOVED') node = @router.assign_redirection_node(err.message) try_redirection(node, pipeline, inner_index) elsif err.message.start_with?('ASK') node = @router.assign_asking_node(err.message) try_asking(node) ? try_redirection(node, pipeline, inner_index) : err else err end end def try_redirection(node, pipeline, inner_index) redirect_command(node, pipeline, inner_index) rescue StandardError => e @exception ? raise : e end def redirect_command(node, pipeline, inner_index) method = pipeline.get_callee_method(inner_index) command = pipeline.get_command(inner_index) timeout = pipeline.get_timeout(inner_index) block = pipeline.get_block(inner_index) args = timeout.nil? ? [] : [timeout] if block.nil? @router.try_send(node, method, command, args) else @router.try_send(node, method, command, args, &block) end end def try_asking(node) node.call('ASKING') == 'OK' rescue StandardError false end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/optimistic_locking.rb0000644000175100017510000000402014625140707027127 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/cluster/transaction' class RedisClient class Cluster class OptimisticLocking def initialize(router) @router = router @asking = false end def watch(keys) slot = find_slot(keys) raise ::RedisClient::Cluster::Transaction::ConsistencyError, "unsafe watch: #{keys.join(' ')}" if slot.nil? # We have not yet selected a node for this transaction, initially, which means we can handle # redirections freely initially (i.e. for the first WATCH call) node = @router.find_primary_node_by_slot(slot) handle_redirection(node, retry_count: 1) do |nd| nd.with do |c| c.ensure_connected_cluster_scoped(retryable: false) do c.call('ASKING') if @asking c.call('WATCH', *keys) begin yield(c, slot, @asking) rescue ::RedisClient::ConnectionError # No need to unwatch on a connection error. raise rescue StandardError c.call('UNWATCH') raise end end end end end private def handle_redirection(node, retry_count: 1, &blk) @router.handle_redirection(node, retry_count: retry_count) do |nd| handle_asking_once(nd, &blk) end end def handle_asking_once(node) yield node rescue ::RedisClient::CommandError => e raise unless ErrorIdentification.client_owns_error?(e, node) raise unless e.message.start_with?('ASK') node = @router.assign_asking_node(e.message) @asking = true yield node ensure @asking = false end def find_slot(keys) return if keys.empty? return if keys.any? { |k| k.nil? || k.empty? } slots = keys.map { |k| @router.find_slot_by_key(k) } return if slots.uniq.size != 1 slots.first end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/normalized_cmd_name.rb0000644000175100017510000000264514625140707027237 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'singleton' class RedisClient class Cluster class NormalizedCmdName include Singleton EMPTY_STRING = '' def initialize @cache = {} @mutex = Mutex.new end def get_by_command(command) get(command, index: 0) end def get_by_subcommand(command) get(command, index: 1) end def get_by_name(name) get(name, index: 0) end def clear @mutex.synchronize { @cache.clear } true end private def get(command, index:) name = extract_name(command, index: index) return EMPTY_STRING if name.nil? || name.empty? normalize(name) end def extract_name(command, index:) case command when String, Symbol then index.zero? ? command : nil when Array then extract_name_from_array(command, index: index) end end def extract_name_from_array(command, index:) return if command.size - 1 < index case e = command[index] when String, Symbol then e when Array then e[index] end end def normalize(name) return @cache[name] || name.to_s.downcase if @cache.key?(name) return name.to_s.downcase if @mutex.locked? str = name.to_s.downcase @mutex.synchronize { @cache[name] = str } str end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node_key.rb0000644000175100017510000000157614625140707025047 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster # Node key's format is `:`. # It is different from node id. # Node id is internal identifying code in Redis Cluster. module NodeKey DELIMITER = ':' module_function def hashify(node_key) host, port = split(node_key) { host: host, port: port } end def split(node_key) pos = node_key&.rindex(DELIMITER, -1) return [node_key, nil] if pos.nil? [node_key[0, pos], node_key[(pos + 1)..]] end def build_from_uri(uri) return '' if uri.nil? "#{uri.host}#{DELIMITER}#{uri.port}" end def build_from_host_port(host, port) "#{host}#{DELIMITER}#{port}" end def build_from_client(client) "#{client.config.host}#{DELIMITER}#{client.config.port}" end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node/0000775000175100017510000000000014625140707023643 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/lib/redis_client/cluster/node/random_replica_or_primary.rb0000644000175100017510000000353714625140707031420 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/node/base_topology' class RedisClient class Cluster class Node class RandomReplicaOrPrimary < BaseTopology def replica_clients keys = @replications.values.filter_map(&:sample) @clients.select { |k, _| keys.include?(k) } end def clients_for_scanning(seed: nil) random = seed.nil? ? Random : Random.new(seed) keys = @replications.map do |primary_node_key, replica_node_keys| decide_use_primary?(random, replica_node_keys.size) ? primary_node_key : replica_node_keys.sample(random: random) end clients.select { |k, _| keys.include?(k) } end def find_node_key_of_replica(primary_node_key, seed: nil) random = seed.nil? ? Random : Random.new(seed) replica_node_keys = @replications.fetch(primary_node_key, EMPTY_ARRAY) if decide_use_primary?(random, replica_node_keys.size) primary_node_key else replica_node_keys.sample(random: random) || primary_node_key end end def any_replica_node_key(seed: nil) random = seed.nil? ? Random : Random.new(seed) @replica_node_keys.sample(random: random) || any_primary_node_key(seed: seed) end private # Randomly equally likely choose node to read between primary and all replicas # e.g. 1 primary + 1 replica = 50% probability to read from primary # e.g. 1 primary + 2 replica = 33% probability to read from primary # e.g. 1 primary + 0 replica = 100% probability to read from primary def decide_use_primary?(random, replica_nodes) primary_nodes = 1.0 total = primary_nodes + replica_nodes random.rand < primary_nodes / total end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node/random_replica.rb0000644000175100017510000000220014625140707027137 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/node/base_topology' class RedisClient class Cluster class Node class RandomReplica < BaseTopology def replica_clients keys = @replications.values.filter_map(&:sample) @clients.select { |k, _| keys.include?(k) } end def clients_for_scanning(seed: nil) random = seed.nil? ? Random : Random.new(seed) keys = @replications.map do |primary_node_key, replica_node_keys| replica_node_keys.empty? ? primary_node_key : replica_node_keys.sample(random: random) end clients.select { |k, _| keys.include?(k) } end def find_node_key_of_replica(primary_node_key, seed: nil) random = seed.nil? ? Random : Random.new(seed) @replications.fetch(primary_node_key, EMPTY_ARRAY).sample(random: random) || primary_node_key end def any_replica_node_key(seed: nil) random = seed.nil? ? Random : Random.new(seed) @replica_node_keys.sample(random: random) || any_primary_node_key(seed: seed) end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node/primary_only.rb0000644000175100017510000000210214625140707026705 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/node/base_topology' class RedisClient class Cluster class Node class PrimaryOnly < BaseTopology alias primary_clients clients alias replica_clients clients def clients_for_scanning(seed: nil) # rubocop:disable Lint/UnusedMethodArgument @clients end def find_node_key_of_replica(primary_node_key, seed: nil) # rubocop:disable Lint/UnusedMethodArgument primary_node_key end def any_primary_node_key(seed: nil) random = seed.nil? ? Random : Random.new(seed) @primary_node_keys.sample(random: random) end alias any_replica_node_key any_primary_node_key def process_topology_update!(replications, options) # Remove non-primary nodes from options (provided that we actually have any primaries at all) options = options.select { |node_key, _| replications.key?(node_key) } if replications.keys.any? super(replications, options) end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node/latency_replica.rb0000644000175100017510000000552014625140707027326 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/node/base_topology' class RedisClient class Cluster class Node class LatencyReplica < BaseTopology DUMMY_LATENCY_MSEC = 100 * 1000 * 1000 MEASURE_ATTEMPT_COUNT = 10 def clients_for_scanning(seed: nil) # rubocop:disable Lint/UnusedMethodArgument @clients_for_scanning end def find_node_key_of_replica(primary_node_key, seed: nil) # rubocop:disable Lint/UnusedMethodArgument @replications.fetch(primary_node_key, EMPTY_ARRAY).first || primary_node_key end def any_replica_node_key(seed: nil) random = seed.nil? ? Random : Random.new(seed) @existed_replicas.sample(random: random)&.first || any_primary_node_key(seed: seed) end def process_topology_update!(replications, options) super all_replica_clients = @clients.select { |k, _| @replica_node_keys.include?(k) } latencies = measure_latencies(all_replica_clients, @concurrent_worker) @replications.each_value { |keys| keys.sort_by! { |k| latencies.fetch(k) } } @replica_clients = select_replica_clients(@replications, @clients) @clients_for_scanning = select_clients_for_scanning(@replications, @clients) @existed_replicas = @replications.values.reject(&:empty?) end private def measure_latencies(clients, concurrent_worker) # rubocop:disable Metrics/AbcSize return {} if clients.empty? work_group = concurrent_worker.new_group(size: clients.size) clients.each do |node_key, client| work_group.push(node_key, client) do |cli| min = DUMMY_LATENCY_MSEC MEASURE_ATTEMPT_COUNT.times do starting = obtain_current_time cli.call_once('PING') duration = obtain_current_time - starting min = duration if duration < min end min rescue StandardError DUMMY_LATENCY_MSEC end end latencies = {} work_group.each { |node_key, v| latencies[node_key] = v } work_group.close latencies end def obtain_current_time Process.clock_gettime(Process::CLOCK_MONOTONIC, :microsecond) end def select_replica_clients(replications, clients) keys = replications.values.filter_map(&:first) clients.select { |k, _| keys.include?(k) } end def select_clients_for_scanning(replications, clients) keys = replications.map do |primary_node_key, replica_node_keys| replica_node_keys.empty? ? primary_node_key : replica_node_keys.first end clients.select { |k, _| keys.include?(k) } end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node/base_topology.rb0000644000175100017510000000443014625140707027035 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster class Node class BaseTopology IGNORE_GENERIC_CONFIG_KEYS = %i[url host port path].freeze EMPTY_HASH = {}.freeze EMPTY_ARRAY = [].freeze attr_reader :clients, :primary_clients, :replica_clients def initialize(pool, concurrent_worker, **kwargs) @pool = pool @clients = {} @client_options = kwargs.reject { |k, _| IGNORE_GENERIC_CONFIG_KEYS.include?(k) } @concurrent_worker = concurrent_worker @replications = EMPTY_HASH @primary_node_keys = EMPTY_ARRAY @replica_node_keys = EMPTY_ARRAY @primary_clients = EMPTY_ARRAY @replica_clients = EMPTY_ARRAY end def any_primary_node_key(seed: nil) random = seed.nil? ? Random : Random.new(seed) @primary_node_keys.sample(random: random) end def process_topology_update!(replications, options) # rubocop:disable Metrics/AbcSize @replications = replications.freeze @primary_node_keys = @replications.keys.sort.select { |k| options.key?(k) }.freeze @replica_node_keys = @replications.values.flatten.sort.select { |k| options.key?(k) }.freeze # Disconnect from nodes that we no longer want, and connect to nodes we're not connected to yet disconnect_from_unwanted_nodes(options) connect_to_new_nodes(options) @primary_clients, @replica_clients = @clients.partition { |k, _| @primary_node_keys.include?(k) }.map(&:to_h) @primary_clients.freeze @replica_clients.freeze end private def disconnect_from_unwanted_nodes(options) (@clients.keys - options.keys).each do |node_key| @clients.delete(node_key).close end end def connect_to_new_nodes(options) (options.keys - @clients.keys).each do |node_key| option = options[node_key].merge(@client_options) config = ::RedisClient::Cluster::Node::Config.new(scale_read: !@primary_node_keys.include?(node_key), **option) client = @pool.nil? ? config.new_client : config.new_pool(**@pool) @clients[node_key] = client end end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/node.rb0000644000175100017510000003466514625140707024204 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/config' require 'redis_client/cluster/errors' require 'redis_client/cluster/node/primary_only' require 'redis_client/cluster/node/random_replica' require 'redis_client/cluster/node/random_replica_or_primary' require 'redis_client/cluster/node/latency_replica' class RedisClient class Cluster class Node include Enumerable # It affects to strike a balance between load and stability in initialization or changed states. MAX_STARTUP_SAMPLE = Integer(ENV.fetch('REDIS_CLIENT_MAX_STARTUP_SAMPLE', 3)) # less memory consumption, but slow USE_CHAR_ARRAY_SLOT = Integer(ENV.fetch('REDIS_CLIENT_USE_CHAR_ARRAY_SLOT', 1)) == 1 SLOT_SIZE = 16_384 MIN_SLOT = 0 MAX_SLOT = SLOT_SIZE - 1 DEAD_FLAGS = %w[fail? fail handshake noaddr noflags].freeze ROLE_FLAGS = %w[master slave].freeze EMPTY_ARRAY = [].freeze EMPTY_HASH = {}.freeze ReloadNeeded = Class.new(::RedisClient::Error) Info = Struct.new( 'RedisClusterNode', :id, :node_key, :role, :primary_id, :ping_sent, :pong_recv, :config_epoch, :link_state, :slots, keyword_init: true ) do def primary? role == 'master' end def replica? role == 'slave' end end class CharArray BASE = '' PADDING = '0' def initialize(size, elements) @elements = elements @string = String.new(BASE, encoding: Encoding::BINARY, capacity: size) size.times { @string << PADDING } end def [](index) raise IndexError if index < 0 return if index >= @string.bytesize @elements[@string.getbyte(index)] end def []=(index, element) raise IndexError if index < 0 return if index >= @string.bytesize pos = @elements.find_index(element) # O(N) if pos.nil? raise(RangeError, 'full of elements') if @elements.size >= 256 pos = @elements.size @elements << element end @string.setbyte(index, pos) end end class Config < ::RedisClient::Config def initialize(scale_read: false, **kwargs) @scale_read = scale_read super(**kwargs) end private def build_connection_prelude prelude = super.dup prelude << ['READONLY'] if @scale_read prelude.freeze end end def initialize(concurrent_worker, config:, pool: nil, **kwargs) @concurrent_worker = concurrent_worker @slots = build_slot_node_mappings(EMPTY_ARRAY) @replications = build_replication_mappings(EMPTY_ARRAY) klass = make_topology_class(config.use_replica?, config.replica_affinity) @topology = klass.new(pool, @concurrent_worker, **kwargs) @config = config @mutex = Mutex.new @last_reloaded_at = nil end def inspect "#<#{self.class.name} #{node_keys.join(', ')}>" end def each(&block) @topology.clients.each_value(&block) end def sample @topology.clients.values.sample end def node_keys @topology.clients.keys.sort end def find_by(node_key) raise ReloadNeeded if node_key.nil? || !@topology.clients.key?(node_key) @topology.clients.fetch(node_key) end def call_all(method, command, args, &block) call_multiple_nodes!(@topology.clients, method, command, args, &block) end def call_primaries(method, command, args, &block) call_multiple_nodes!(@topology.primary_clients, method, command, args, &block) end def call_replicas(method, command, args, &block) call_multiple_nodes!(@topology.replica_clients, method, command, args, &block) end def send_ping(method, command, args, &block) result_values, errors = call_multiple_nodes(@topology.clients, method, command, args, &block) return result_values if errors.nil? || errors.empty? raise ReloadNeeded if errors.values.any?(::RedisClient::ConnectionError) raise ::RedisClient::Cluster::ErrorCollection, errors end def clients_for_scanning(seed: nil) @topology.clients_for_scanning(seed: seed).values.sort_by { |c| "#{c.config.host}-#{c.config.port}" } end def clients @topology.clients.values end def primary_clients @topology.primary_clients.values end def replica_clients @topology.replica_clients.values end def find_node_key_of_primary(slot) return if slot.nil? slot = Integer(slot) return if slot < MIN_SLOT || slot > MAX_SLOT @slots[slot] end def find_node_key_of_replica(slot, seed: nil) primary_node_key = find_node_key_of_primary(slot) @topology.find_node_key_of_replica(primary_node_key, seed: seed) end def any_primary_node_key(seed: nil) @topology.any_primary_node_key(seed: seed) end def any_replica_node_key(seed: nil) @topology.any_replica_node_key(seed: seed) end def update_slot(slot, node_key) return if @mutex.locked? @mutex.synchronize do @slots[slot] = node_key rescue RangeError @slots = Array.new(SLOT_SIZE) { |i| @slots[i] } @slots[slot] = node_key end end def reload! with_reload_lock do with_startup_clients(MAX_STARTUP_SAMPLE) do |startup_clients| @node_info = refetch_node_info_list(startup_clients) @node_configs = @node_info.to_h do |node_info| [node_info.node_key, @config.client_config_for_node(node_info.node_key)] end @slots = build_slot_node_mappings(@node_info) @replications = build_replication_mappings(@node_info) @topology.process_topology_update!(@replications, @node_configs) end end end private def make_topology_class(with_replica, replica_affinity) if with_replica && replica_affinity == :random ::RedisClient::Cluster::Node::RandomReplica elsif with_replica && replica_affinity == :random_with_primary ::RedisClient::Cluster::Node::RandomReplicaOrPrimary elsif with_replica && replica_affinity == :latency ::RedisClient::Cluster::Node::LatencyReplica else ::RedisClient::Cluster::Node::PrimaryOnly end end def build_slot_node_mappings(node_info_list) slots = make_array_for_slot_node_mappings(node_info_list) node_info_list.each do |info| next if info.slots.nil? || info.slots.empty? info.slots.each { |start, last| (start..last).each { |i| slots[i] = info.node_key } } end slots end def make_array_for_slot_node_mappings(node_info_list) return Array.new(SLOT_SIZE) if !USE_CHAR_ARRAY_SLOT || node_info_list.count(&:primary?) > 256 primary_node_keys = node_info_list.select(&:primary?).map(&:node_key) ::RedisClient::Cluster::Node::CharArray.new(SLOT_SIZE, primary_node_keys) end def build_replication_mappings(node_info_list) # rubocop:disable Metrics/AbcSize dict = node_info_list.to_h { |info| [info.id, info] } node_info_list.each_with_object(Hash.new { |h, k| h[k] = [] }) do |info, acc| primary_info = dict[info.primary_id] acc[primary_info.node_key] << info.node_key unless primary_info.nil? acc[info.node_key] if info.primary? # for the primary which have no replicas end end def call_multiple_nodes(clients, method, command, args, &block) results, errors = try_map(clients) do |_, client| client.public_send(method, *args, command, &block) end [results&.values, errors] end def call_multiple_nodes!(clients, method, command, args, &block) result_values, errors = call_multiple_nodes(clients, method, command, args, &block) return result_values if errors.nil? || errors.empty? raise ::RedisClient::Cluster::ErrorCollection, errors end def try_map(clients, &block) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity return [{}, {}] if clients.empty? work_group = @concurrent_worker.new_group(size: clients.size) clients.each do |node_key, client| work_group.push(node_key, node_key, client, block) do |nk, cli, blk| blk.call(nk, cli) rescue StandardError => e e end end results = errors = nil work_group.each do |node_key, v| case v when StandardError errors ||= {} errors[node_key] = v else results ||= {} results[node_key] = v end end work_group.close [results, errors] end def refetch_node_info_list(startup_clients) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity startup_size = startup_clients.size work_group = @concurrent_worker.new_group(size: startup_size) startup_clients.each_with_index do |raw_client, i| work_group.push(i, raw_client) do |client| regular_timeout = client.read_timeout client.read_timeout = @config.slow_command_timeout > 0.0 ? @config.slow_command_timeout : regular_timeout reply = client.call('CLUSTER', 'NODES') client.read_timeout = regular_timeout parse_cluster_node_reply(reply) rescue StandardError => e e ensure client&.close end end node_info_list = errors = nil work_group.each do |i, v| case v when StandardError errors ||= Array.new(startup_size) errors[i] = v else node_info_list ||= Array.new(startup_size) node_info_list[i] = v end end work_group.close raise ::RedisClient::Cluster::InitialSetupError, errors if node_info_list.nil? grouped = node_info_list.compact.group_by do |info_list| info_list.sort_by!(&:id) info_list.each_with_object(String.new(capacity: 128 * info_list.size)) do |e, a| a << e.id << e.node_key << e.role << e.primary_id << e.config_epoch end end grouped.max_by { |_, v| v.size }[1].first end def parse_cluster_node_reply(reply) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity reply.each_line("\n", chomp: true).filter_map do |line| fields = line.split flags = fields[2].split(',') next unless fields[7] == 'connected' && (flags & DEAD_FLAGS).empty? slots = if fields[8].nil? EMPTY_ARRAY else fields[8..].reject { |str| str.start_with?('[') } .map { |str| str.split('-').map { |s| Integer(s) } } .map { |a| a.size == 1 ? a << a.first : a } .map(&:sort) end ::RedisClient::Cluster::Node::Info.new( id: fields[0], node_key: parse_node_key(fields[1]), role: (flags & ROLE_FLAGS).first, primary_id: fields[3], ping_sent: fields[4], pong_recv: fields[5], config_epoch: fields[6], link_state: fields[7], slots: slots ) end end # As redirection node_key is dependent on `cluster-preferred-endpoint-type` config, # node_key should use hostname if present in CLUSTER NODES output. # # See https://redis.io/commands/cluster-nodes/ for details on the output format. # node_address matches fhe format: def parse_node_key(node_address) ip_chunk, hostname, _auxiliaries = node_address.split(',') ip_port_string = ip_chunk.split('@').first return ip_port_string if hostname.nil? || hostname.empty? port = ip_port_string.split(':')[1] "#{hostname}:#{port}" end def with_startup_clients(count) # rubocop:disable Metrics/AbcSize if @config.connect_with_original_config # If connect_with_original_config is set, that means we need to build actual client objects # and close them, so that we e.g. re-resolve a DNS entry with the cluster nodes in it. begin # Memoize the startup clients, so we maintain RedisClient's internal circuit breaker configuration # if it's set. @startup_clients ||= @config.startup_nodes.values.sample(count).map do |node_config| ::RedisClient::Cluster::Node::Config.new(**node_config).new_client end yield @startup_clients ensure # Close the startup clients when we're done, so we don't maintain pointless open connections to # the cluster though @startup_clients&.each(&:close) end else # (re-)connect using nodes we already know about. # If this is the first time we're connecting to the cluster, we need to seed the topology with the # startup clients though. @topology.process_topology_update!({}, @config.startup_nodes) if @topology.clients.empty? yield @topology.clients.values.sample(count) end end def with_reload_lock # What should happen with concurrent calls #reload? This is a realistic possibility if the cluster goes into # a CLUSTERDOWN state, and we're using a pooled backend. Every thread will independently discover this, and # call reload!. # For now, if a reload is in progress, wait for that to complete, and consider that the same as us having # performed the reload. # Probably in the future we should add a circuit breaker to #reload itself, and stop trying if the cluster is # obviously not working. wait_start = Process.clock_gettime(Process::CLOCK_MONOTONIC) @mutex.synchronize do return if @last_reloaded_at && @last_reloaded_at > wait_start r = yield @last_reloaded_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) r end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/key_slot_converter.rb0000644000175100017510000000661214625140707027166 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster module KeySlotConverter EMPTY_STRING = '' LEFT_BRACKET = '{' RIGHT_BRACKET = '}' XMODEM_CRC16_LOOKUP = [ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 ].freeze HASH_SLOTS = 16_384 module_function def convert(key) return nil if key.nil? hash_tag = extract_hash_tag(key) key = hash_tag unless hash_tag.empty? crc = 0 key.each_byte do |b| crc = ((crc << 8) & 0xffff) ^ XMODEM_CRC16_LOOKUP[((crc >> 8) ^ b) & 0xff] end crc % HASH_SLOTS end # @see https://redis.io/topics/cluster-spec#keys-hash-tags Keys hash tags def extract_hash_tag(key) key = key.to_s s = key.index(LEFT_BRACKET) return EMPTY_STRING if s.nil? e = key.index(RIGHT_BRACKET, s + 1) return EMPTY_STRING if e.nil? key[s + 1..e - 1] end def hash_tag_included?(key) key = key.to_s s = key.index(LEFT_BRACKET) return false if s.nil? e = key.index(RIGHT_BRACKET, s + 1) return false if e.nil? s + 1 < e end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/errors.rb0000644000175100017510000000355414625140707024564 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' class RedisClient class Cluster ERR_ARG_NORMALIZATION = ->(arg) { Array[arg].flatten.reject { |e| e.nil? || (e.respond_to?(:empty?) && e.empty?) } } class InitialSetupError < ::RedisClient::Error def initialize(errors) msg = ERR_ARG_NORMALIZATION.call(errors).map(&:message).uniq.join(',') super("Redis client could not fetch cluster information: #{msg}") end end class OrchestrationCommandNotSupported < ::RedisClient::Error def initialize(command) str = ERR_ARG_NORMALIZATION.call(command).map(&:to_s).join(' ').upcase msg = "#{str} command should be used with care " \ 'only by applications orchestrating Redis Cluster, like redis-cli, ' \ 'and the command if used out of the right context can leave the cluster ' \ 'in a wrong state or cause data loss.' super(msg) end end class ErrorCollection < ::RedisClient::Error attr_reader :errors def initialize(errors) @errors = {} if !errors.is_a?(Hash) || errors.empty? super('') return end @errors = errors messages = @errors.map { |node_key, error| "#{node_key}: #{error.message}" } super("Errors occurred on any node: #{messages.join(', ')}") end end class AmbiguousNodeError < ::RedisClient::Error def initialize(command) super("Cluster client doesn't know which node the #{command} command should be sent to.") end end class NodeMightBeDown < ::RedisClient::Error def initialize(_ = '') super( 'The client is trying to fetch the latest cluster state ' \ 'because a subset of nodes might be down. ' \ 'It might continue to raise errors for a while.' ) end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/error_identification.rb0000644000175100017510000000217014625140707027443 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster module ErrorIdentification def self.client_owns_error?(err, client) return true unless identifiable?(err) err.from?(client) end def self.identifiable?(err) err.is_a?(TaggedError) end module TaggedError attr_accessor :config_instance def from?(client) client.config.equal?(config_instance) end end module Middleware def connect(config) super rescue RedisClient::Error => e identify_error(e, config) raise end def call(_command, config) super rescue RedisClient::Error => e identify_error(e, config) raise end def call_pipelined(_command, config) super rescue RedisClient::Error => e identify_error(e, config) raise end private def identify_error(err, config) err.singleton_class.include(TaggedError) err.config_instance = config end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/concurrent_worker/0000775000175100017510000000000014625140707026471 5ustar vivekdebvivekdebredis-cluster-client-0.10.0/lib/redis_client/cluster/concurrent_worker/pooled.rb0000644000175100017510000000316714625140707030305 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/pid_cache' class RedisClient class Cluster module ConcurrentWorker # This class is just an experimental implementation. # Ruby VM allocates 1 MB memory as a stack for a thread. # It is a fixed size but we can modify the size with some environment variables. # So it consumes memory 1 MB multiplied a number of workers. class Pooled def initialize(size:) @size = size setup end def new_group(size:) reset if @pid != ::RedisClient::PIDCache.pid ensure_workers if @workers.first.nil? ::RedisClient::Cluster::ConcurrentWorker::Group.new( worker: self, queue: SizedQueue.new(size), size: size ) end def push(task) @q << task end def close @q.clear @workers.each { |t| t&.exit } @workers.clear @q.close @pid = nil nil end def inspect "#<#{self.class.name} tasks: #{@q.size}, workers: #{@size}>" end private def setup @q = Queue.new @workers = Array.new(@size) @pid = ::RedisClient::PIDCache.pid end def reset close setup end def ensure_workers @size.times do |i| @workers[i] = spawn_worker unless @workers[i]&.alive? end end def spawn_worker Thread.new(@q) do |q| loop { q.pop.exec } end end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/concurrent_worker/on_demand.rb0000644000175100017510000000152214625140707030740 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster module ConcurrentWorker class OnDemand def initialize(size:) @q = SizedQueue.new(size) end def new_group(size:) ::RedisClient::Cluster::ConcurrentWorker::Group.new( worker: self, queue: SizedQueue.new(size), size: size ) end def push(task) @q << spawn_worker(task, @q) end def close @q.clear @q.close nil end def inspect "#<#{self.class.name} active: #{@q.size}, max: #{@q.max}>" end private def spawn_worker(task, queue) Thread.new(task, queue) do |t, q| t.exec q.pop end end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/concurrent_worker/none.rb0000644000175100017510000000075214625140707027757 0ustar vivekdebvivekdeb# frozen_string_literal: true class RedisClient class Cluster module ConcurrentWorker class None def new_group(size:) ::RedisClient::Cluster::ConcurrentWorker::Group.new( worker: self, queue: [], size: size ) end def push(task) task.exec end def close; end def inspect "#<#{self.class.name} main thread only>" end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/concurrent_worker.rb0000644000175100017510000000444014625140707027016 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/concurrent_worker/on_demand' require 'redis_client/cluster/concurrent_worker/pooled' require 'redis_client/cluster/concurrent_worker/none' class RedisClient class Cluster module ConcurrentWorker InvalidNumberOfTasks = Class.new(StandardError) class Group Task = Struct.new( 'RedisClusterClientConcurrentWorkerTask', :id, :queue, :args, :kwargs, :block, :result, keyword_init: true ) do def exec self[:result] = block&.call(*args, **kwargs) rescue StandardError => e self[:result] = e ensure done end def done queue&.push(self) rescue ClosedQueueError # something was wrong end end def initialize(worker:, queue:, size:) @worker = worker @queue = queue @size = size @count = 0 end def push(id, *args, **kwargs, &block) raise InvalidNumberOfTasks, "max size reached: #{@count}" if @count == @size task = Task.new(id: id, queue: @queue, args: args, kwargs: kwargs, block: block) @worker.push(task) @count += 1 nil end def each raise InvalidNumberOfTasks, "expected: #{@size}, actual: #{@count}" if @count != @size @size.times do task = @queue.pop yield(task.id, task.result) end nil end def close @queue.clear @queue.close if @queue.respond_to?(:close) @count = 0 nil end def inspect "#<#{self.class.name} size: #{@count}, max: #{@size}, worker: #{@worker.class.name}>" end end module_function def create(model: :on_demand, size: 5) size = size.positive? ? size : 5 case model when :on_demand, nil then ::RedisClient::Cluster::ConcurrentWorker::OnDemand.new(size: size) when :pooled then ::RedisClient::Cluster::ConcurrentWorker::Pooled.new(size: size) when :none then ::RedisClient::Cluster::ConcurrentWorker::None.new else raise ArgumentError, "Unknown model: #{model}" end end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster/command.rb0000644000175100017510000001332714625140707024665 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client' require 'redis_client/cluster/errors' require 'redis_client/cluster/key_slot_converter' require 'redis_client/cluster/normalized_cmd_name' class RedisClient class Cluster class Command EMPTY_STRING = '' EMPTY_HASH = {}.freeze EMPTY_ARRAY = [].freeze Detail = Struct.new( 'RedisCommand', :first_key_position, :last_key_position, :key_step, :write?, :readonly?, keyword_init: true ) class << self def load(nodes, slow_command_timeout: -1) cmd = errors = nil nodes&.each do |node| regular_timeout = node.read_timeout node.read_timeout = slow_command_timeout > 0.0 ? slow_command_timeout : regular_timeout reply = node.call('COMMAND') node.read_timeout = regular_timeout commands = parse_command_reply(reply) cmd = ::RedisClient::Cluster::Command.new(commands) break rescue ::RedisClient::Error => e errors ||= [] errors << e end return cmd unless cmd.nil? raise ::RedisClient::Cluster::InitialSetupError, errors end private def parse_command_reply(rows) rows&.each_with_object({}) do |row, acc| next if row[0].nil? acc[row[0].downcase] = ::RedisClient::Cluster::Command::Detail.new( first_key_position: row[3], last_key_position: row[4], key_step: row[5], write?: row[2].include?('write'), readonly?: row[2].include?('readonly') ) end.freeze || EMPTY_HASH end end def initialize(commands) @commands = commands || EMPTY_HASH end def extract_first_key(command) i = determine_first_key_position(command) return EMPTY_STRING if i == 0 (command[i].is_a?(Array) ? command[i].flatten.first : command[i]).to_s end def extract_all_keys(command) keys_start = determine_first_key_position(command) keys_end = determine_last_key_position(command, keys_start) keys_step = determine_key_step(command) return EMPTY_ARRAY if [keys_start, keys_end, keys_step].any?(&:zero?) keys_end = [keys_end, command.size - 1].min # use .. inclusive range because keys_end is a valid index. (keys_start..keys_end).step(keys_step).map { |i| command[i] } end def should_send_to_primary?(command) name = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) @commands[name]&.write? end def should_send_to_replica?(command) name = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) @commands[name]&.readonly? end def exists?(name) @commands.key?(::RedisClient::Cluster::NormalizedCmdName.instance.get_by_name(name)) end private def determine_first_key_position(command) # rubocop:disable Metrics/CyclomaticComplexity case name = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) when 'eval', 'evalsha', 'zinterstore', 'zunionstore' then 3 when 'object' then 2 when 'memory' command[1].to_s.casecmp('usage').zero? ? 2 : 0 when 'migrate' command[3].empty? ? determine_optional_key_position(command, 'keys') : 3 when 'xread', 'xreadgroup' determine_optional_key_position(command, 'streams') else @commands[name]&.first_key_position.to_i end end # IMPORTANT: this determines the last key position INCLUSIVE of the last key - # i.e. command[determine_last_key_position(command)] is a key. # This is in line with what Redis returns from COMMANDS. def determine_last_key_position(command, keys_start) # rubocop:disable Metrics/AbcSize case name = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) when 'eval', 'evalsha', 'zinterstore', 'zunionstore' # EVALSHA sha1 numkeys [key [key ...]] [arg [arg ...]] # ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE ] command[2].to_i + 2 when 'object', 'memory' # OBJECT [ENCODING | FREQ | IDLETIME | REFCOUNT] key # MEMORY USAGE key [SAMPLES count] keys_start when 'migrate' # MIGRATE host port destination-db timeout [COPY] [REPLACE] [AUTH password | AUTH2 username password] [KEYS key [key ...]] command[3].empty? ? (command.length - 1) : 3 when 'xread', 'xreadgroup' # XREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] id [id ...] keys_start + ((command.length - keys_start) / 2) - 1 else # If there is a fixed, non-variable number of keys, don't iterate past that. if @commands[name].last_key_position >= 0 @commands[name].last_key_position else command.length + @commands[name].last_key_position end end end def determine_optional_key_position(command, option_name) # rubocop:disable Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity idx = command&.flatten&.map(&:to_s)&.map(&:downcase)&.index(option_name&.downcase) idx.nil? ? 0 : idx + 1 end def determine_key_step(command) name = ::RedisClient::Cluster::NormalizedCmdName.instance.get_by_command(command) # Some commands like EVALSHA have zero as the step in COMMANDS somehow. @commands[name].key_step == 0 ? 1 : @commands[name].key_step end end end end redis-cluster-client-0.10.0/lib/redis_client/cluster.rb0000644000175100017510000001070314625140707023242 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_client/cluster/concurrent_worker' require 'redis_client/cluster/pipeline' require 'redis_client/cluster/pub_sub' require 'redis_client/cluster/router' require 'redis_client/cluster/transaction' require 'redis_client/cluster/optimistic_locking' class RedisClient class Cluster ZERO_CURSOR_FOR_SCAN = '0' attr_reader :config def initialize(config, pool: nil, concurrency: nil, **kwargs) @config = config @concurrent_worker = ::RedisClient::Cluster::ConcurrentWorker.create(**(concurrency || {})) @command_builder = config.command_builder @pool = pool @kwargs = kwargs @router = nil @mutex = Mutex.new end def inspect node_keys = @router.nil? ? @config.startup_nodes.keys : router.node_keys "#<#{self.class.name} #{node_keys.join(', ')}>" end def call(*args, **kwargs, &block) command = @command_builder.generate(args, kwargs) router.send_command(:call_v, command, &block) end def call_v(command, &block) command = @command_builder.generate(command) router.send_command(:call_v, command, &block) end def call_once(*args, **kwargs, &block) command = @command_builder.generate(args, kwargs) router.send_command(:call_once_v, command, &block) end def call_once_v(command, &block) command = @command_builder.generate(command) router.send_command(:call_once_v, command, &block) end def blocking_call(timeout, *args, **kwargs, &block) command = @command_builder.generate(args, kwargs) router.send_command(:blocking_call_v, command, timeout, &block) end def blocking_call_v(timeout, command, &block) command = @command_builder.generate(command) router.send_command(:blocking_call_v, command, timeout, &block) end def scan(*args, **kwargs, &block) raise ArgumentError, 'block required' unless block seed = Random.new_seed cursor = ZERO_CURSOR_FOR_SCAN loop do cursor, keys = router.scan('SCAN', cursor, *args, seed: seed, **kwargs) keys.each(&block) break if cursor == ZERO_CURSOR_FOR_SCAN end end def sscan(key, *args, **kwargs, &block) node = router.assign_node(['SSCAN', key]) router.try_delegate(node, :sscan, key, *args, **kwargs, &block) end def hscan(key, *args, **kwargs, &block) node = router.assign_node(['HSCAN', key]) router.try_delegate(node, :hscan, key, *args, **kwargs, &block) end def zscan(key, *args, **kwargs, &block) node = router.assign_node(['ZSCAN', key]) router.try_delegate(node, :zscan, key, *args, **kwargs, &block) end def pipelined(exception: true) seed = @config.use_replica? && @config.replica_affinity == :random ? nil : Random.new_seed pipeline = ::RedisClient::Cluster::Pipeline.new( router, @command_builder, @concurrent_worker, exception: exception, seed: seed ) yield pipeline return [] if pipeline.empty? pipeline.execute end def multi(watch: nil) if watch.nil? || watch.empty? transaction = ::RedisClient::Cluster::Transaction.new(router, @command_builder) yield transaction return transaction.execute end ::RedisClient::Cluster::OptimisticLocking.new(router).watch(watch) do |c, slot, asking| transaction = ::RedisClient::Cluster::Transaction.new( router, @command_builder, node: c, slot: slot, asking: asking ) yield transaction transaction.execute end end def pubsub ::RedisClient::Cluster::PubSub.new(router, @command_builder) end def with(...) raise NotImplementedError, 'No way to use' end def close @router&.close @concurrent_worker.close nil end private def router return @router unless @router.nil? @mutex.synchronize do @router ||= ::RedisClient::Cluster::Router.new(@config, @concurrent_worker, pool: @pool, **@kwargs) end end def method_missing(name, *args, **kwargs, &block) if router.command_exists?(name) args.unshift(name) command = @command_builder.generate(args, kwargs) return router.send_command(:call_v, command, &block) end super end def respond_to_missing?(name, include_private = false) return true if router.command_exists?(name) super end end end redis-cluster-client-0.10.0/lib/redis-cluster-client.rb0000644000175100017510000000007614625140707023160 0ustar vivekdebvivekdeb# frozen_string_literal: true require 'redis_cluster_client'